Merge V8 5.3.332.45.  DO NOT MERGE

Test: Manual

FPIIM-449

Change-Id: Id3254828b068abdea3cb10442e0172a8c9a98e03
(cherry picked from commit 13e2dadd00298019ed862f2b2fc5068bba730bcf)
diff --git a/src/accessors.cc b/src/accessors.cc
index 8b8753b..74238eb 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -86,23 +86,6 @@
 }
 
 
-bool Accessors::IsJSArrayBufferViewFieldAccessor(Handle<Map> map,
-                                                 Handle<Name> name,
-                                                 int* object_offset) {
-  DCHECK(name->IsUniqueName());
-  Isolate* isolate = name->GetIsolate();
-
-  switch (map->instance_type()) {
-    case JS_DATA_VIEW_TYPE:
-      return CheckForName(name, isolate->factory()->byte_length_string(),
-                          JSDataView::kByteLengthOffset, object_offset) ||
-             CheckForName(name, isolate->factory()->byte_offset_string(),
-                          JSDataView::kByteOffsetOffset, object_offset);
-    default:
-      return false;
-  }
-}
-
 namespace {
 
 MUST_USE_RESULT MaybeHandle<Object> ReplaceAccessorWithDataProperty(
@@ -588,7 +571,7 @@
   Handle<Script> script(
       Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
   Handle<Object> result = isolate->factory()->undefined_value();
-  if (!script->eval_from_shared()->IsUndefined()) {
+  if (!script->eval_from_shared()->IsUndefined(isolate)) {
     Handle<SharedFunctionInfo> eval_from_shared(
         SharedFunctionInfo::cast(script->eval_from_shared()));
     if (eval_from_shared->script()->IsScript()) {
@@ -654,11 +637,11 @@
   Handle<Script> script(
       Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
   Handle<Object> result = isolate->factory()->undefined_value();
-  if (!script->eval_from_shared()->IsUndefined()) {
+  if (!script->eval_from_shared()->IsUndefined(isolate)) {
     Handle<SharedFunctionInfo> shared(
         SharedFunctionInfo::cast(script->eval_from_shared()));
     // Find the name of the function calling eval.
-    if (!shared->name()->IsUndefined()) {
+    if (!shared->name()->IsUndefined(isolate)) {
       result = Handle<Object>(shared->name(), isolate);
     } else {
       result = Handle<Object>(shared->inferred_name(), isolate);
@@ -1149,7 +1132,7 @@
     return;
   }
   Object* value = context->get(slot);
-  if (value->IsTheHole()) {
+  if (value->IsTheHole(isolate)) {
     Handle<Name> name = v8::Utils::OpenHandle(*property);
 
     Handle<Object> exception = isolate->factory()->NewReferenceError(
diff --git a/src/accessors.h b/src/accessors.h
index 6a99934..7863c5a 100644
--- a/src/accessors.h
+++ b/src/accessors.h
@@ -95,14 +95,6 @@
   static bool IsJSObjectFieldAccessor(Handle<Map> map, Handle<Name> name,
                                       int* object_offset);
 
-  // Returns true for properties that are accessors to ArrayBufferView and
-  // derived classes fields. If true, *object_offset contains offset of
-  // object field. The caller still has to check whether the underlying
-  // buffer was neutered.
-  static bool IsJSArrayBufferViewFieldAccessor(Handle<Map> map,
-                                               Handle<Name> name,
-                                               int* object_offset);
-
   static Handle<AccessorInfo> MakeAccessor(
       Isolate* isolate,
       Handle<Name> name,
diff --git a/src/address-map.cc b/src/address-map.cc
index 86558e0..61292bf 100644
--- a/src/address-map.cc
+++ b/src/address-map.cc
@@ -13,7 +13,7 @@
 RootIndexMap::RootIndexMap(Isolate* isolate) {
   map_ = isolate->root_index_map();
   if (map_ != NULL) return;
-  map_ = new HashMap(HashMap::PointersMatch);
+  map_ = new base::HashMap(base::HashMap::PointersMatch);
   for (uint32_t i = 0; i < Heap::kStrongRootListLength; i++) {
     Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i);
     Object* root = isolate->heap()->root(root_index);
@@ -22,7 +22,7 @@
     // not be referenced through the root list in the snapshot.
     if (isolate->heap()->RootCanBeTreatedAsConstant(root_index)) {
       HeapObject* heap_object = HeapObject::cast(root);
-      HashMap::Entry* entry = LookupEntry(map_, heap_object, false);
+      base::HashMap::Entry* entry = LookupEntry(map_, heap_object, false);
       if (entry != NULL) {
         // Some are initialized to a previous value in the root list.
         DCHECK_LT(GetValue(entry), i);
diff --git a/src/address-map.h b/src/address-map.h
index 017fc5d..ce21705 100644
--- a/src/address-map.h
+++ b/src/address-map.h
@@ -6,7 +6,7 @@
 #define V8_ADDRESS_MAP_H_
 
 #include "src/assert-scope.h"
-#include "src/hashmap.h"
+#include "src/base/hashmap.h"
 #include "src/objects.h"
 
 namespace v8 {
@@ -14,16 +14,17 @@
 
 class AddressMapBase {
  protected:
-  static void SetValue(HashMap::Entry* entry, uint32_t v) {
+  static void SetValue(base::HashMap::Entry* entry, uint32_t v) {
     entry->value = reinterpret_cast<void*>(v);
   }
 
-  static uint32_t GetValue(HashMap::Entry* entry) {
+  static uint32_t GetValue(base::HashMap::Entry* entry) {
     return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
   }
 
-  inline static HashMap::Entry* LookupEntry(HashMap* map, HeapObject* obj,
-                                            bool insert) {
+  inline static base::HashMap::Entry* LookupEntry(base::HashMap* map,
+                                                  HeapObject* obj,
+                                                  bool insert) {
     if (insert) {
       map->LookupOrInsert(Key(obj), Hash(obj));
     }
@@ -47,13 +48,13 @@
   static const int kInvalidRootIndex = -1;
 
   int Lookup(HeapObject* obj) {
-    HashMap::Entry* entry = LookupEntry(map_, obj, false);
+    base::HashMap::Entry* entry = LookupEntry(map_, obj, false);
     if (entry) return GetValue(entry);
     return kInvalidRootIndex;
   }
 
  private:
-  HashMap* map_;
+  base::HashMap* map_;
 
   DISALLOW_COPY_AND_ASSIGN(RootIndexMap);
 };
@@ -180,18 +181,18 @@
  public:
   SerializerReferenceMap()
       : no_allocation_(),
-        map_(HashMap::PointersMatch),
+        map_(base::HashMap::PointersMatch),
         attached_reference_index_(0) {}
 
   SerializerReference Lookup(HeapObject* obj) {
-    HashMap::Entry* entry = LookupEntry(&map_, obj, false);
+    base::HashMap::Entry* entry = LookupEntry(&map_, obj, false);
     return entry ? SerializerReference(GetValue(entry)) : SerializerReference();
   }
 
   void Add(HeapObject* obj, SerializerReference b) {
     DCHECK(b.is_valid());
     DCHECK_NULL(LookupEntry(&map_, obj, false));
-    HashMap::Entry* entry = LookupEntry(&map_, obj, true);
+    base::HashMap::Entry* entry = LookupEntry(&map_, obj, true);
     SetValue(entry, b.bitfield_);
   }
 
@@ -204,7 +205,7 @@
 
  private:
   DisallowHeapAllocation no_allocation_;
-  HashMap map_;
+  base::HashMap map_;
   int attached_reference_index_;
   DISALLOW_COPY_AND_ASSIGN(SerializerReferenceMap);
 };
diff --git a/src/api-arguments-inl.h b/src/api-arguments-inl.h
new file mode 100644
index 0000000..89ac7de
--- /dev/null
+++ b/src/api-arguments-inl.h
@@ -0,0 +1,105 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/api-arguments.h"
+
+#include "src/tracing/trace-event.h"
+#include "src/vm-state-inl.h"
+
+namespace v8 {
+namespace internal {
+
+#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(F)                  \
+  F(AccessorNameGetterCallback, "get", v8::Value, Object)          \
+  F(GenericNamedPropertyQueryCallback, "has", v8::Integer, Object) \
+  F(GenericNamedPropertyDeleterCallback, "delete", v8::Boolean, Object)
+
+#define WRITE_CALL_1_NAME(Function, type, ApiReturn, InternalReturn)          \
+  Handle<InternalReturn> PropertyCallbackArguments::Call(Function f,          \
+                                                         Handle<Name> name) { \
+    Isolate* isolate = this->isolate();                                       \
+    RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function);        \
+    VMState<EXTERNAL> state(isolate);                                         \
+    ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));              \
+    PropertyCallbackInfo<ApiReturn> info(begin());                            \
+    LOG(isolate,                                                              \
+        ApiNamedPropertyAccess("interceptor-named-" type, holder(), *name));  \
+    f(v8::Utils::ToLocal(name), info);                                        \
+    return GetReturnValue<InternalReturn>(isolate);                           \
+  }
+
+FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(WRITE_CALL_1_NAME)
+
+#undef FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME
+#undef WRITE_CALL_1_NAME
+
+#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX(F)            \
+  F(IndexedPropertyGetterCallback, "get", v8::Value, Object)  \
+  F(IndexedPropertyQueryCallback, "has", v8::Integer, Object) \
+  F(IndexedPropertyDeleterCallback, "delete", v8::Boolean, Object)
+
+#define WRITE_CALL_1_INDEX(Function, type, ApiReturn, InternalReturn)      \
+  Handle<InternalReturn> PropertyCallbackArguments::Call(Function f,       \
+                                                         uint32_t index) { \
+    Isolate* isolate = this->isolate();                                    \
+    RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function);     \
+    VMState<EXTERNAL> state(isolate);                                      \
+    ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));           \
+    PropertyCallbackInfo<ApiReturn> info(begin());                         \
+    LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-" type,     \
+                                          holder(), index));               \
+    f(index, info);                                                        \
+    return GetReturnValue<InternalReturn>(isolate);                        \
+  }
+
+FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX(WRITE_CALL_1_INDEX)
+
+#undef FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX
+#undef WRITE_CALL_1_INDEX
+
+Handle<Object> PropertyCallbackArguments::Call(
+    GenericNamedPropertySetterCallback f, Handle<Name> name,
+    Handle<Object> value) {
+  Isolate* isolate = this->isolate();
+  RuntimeCallTimerScope timer(
+      isolate, &RuntimeCallStats::GenericNamedPropertySetterCallback);
+  VMState<EXTERNAL> state(isolate);
+  ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
+  PropertyCallbackInfo<v8::Value> info(begin());
+  LOG(isolate,
+      ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
+  f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
+  return GetReturnValue<Object>(isolate);
+}
+
+Handle<Object> PropertyCallbackArguments::Call(IndexedPropertySetterCallback f,
+                                               uint32_t index,
+                                               Handle<Object> value) {
+  Isolate* isolate = this->isolate();
+  RuntimeCallTimerScope timer(isolate,
+                              &RuntimeCallStats::IndexedPropertySetterCallback);
+  VMState<EXTERNAL> state(isolate);
+  ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
+  PropertyCallbackInfo<v8::Value> info(begin());
+  LOG(isolate,
+      ApiIndexedPropertyAccess("interceptor-indexed-set", holder(), index));
+  f(index, v8::Utils::ToLocal(value), info);
+  return GetReturnValue<Object>(isolate);
+}
+
+void PropertyCallbackArguments::Call(AccessorNameSetterCallback f,
+                                     Handle<Name> name, Handle<Object> value) {
+  Isolate* isolate = this->isolate();
+  RuntimeCallTimerScope timer(isolate,
+                              &RuntimeCallStats::AccessorNameSetterCallback);
+  VMState<EXTERNAL> state(isolate);
+  ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
+  PropertyCallbackInfo<void> info(begin());
+  LOG(isolate,
+      ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
+  f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/api-arguments.cc b/src/api-arguments.cc
index 71a0f60..f8d6c8f 100644
--- a/src/api-arguments.cc
+++ b/src/api-arguments.cc
@@ -4,6 +4,9 @@
 
 #include "src/api-arguments.h"
 
+#include "src/tracing/trace-event.h"
+#include "src/vm-state-inl.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/api-arguments.h b/src/api-arguments.h
index 57a2d98..0dfe618 100644
--- a/src/api-arguments.h
+++ b/src/api-arguments.h
@@ -7,8 +7,6 @@
 
 #include "src/api.h"
 #include "src/isolate.h"
-#include "src/tracing/trace-event.h"
-#include "src/vm-state-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -58,7 +56,7 @@
   // Check the ReturnValue.
   Object** handle = &this->begin()[kReturnValueOffset];
   // Nothing was set, return empty handle as per previous behaviour.
-  if ((*handle)->IsTheHole()) return Handle<V>();
+  if ((*handle)->IsTheHole(isolate)) return Handle<V>();
   Handle<V> result = Handle<V>::cast(Handle<Object>(handle));
   result->VerifyApiCallResultType();
   return result;
@@ -108,92 +106,24 @@
  */
   Handle<JSObject> Call(IndexedPropertyEnumeratorCallback f);
 
-#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(F)                  \
-  F(AccessorNameGetterCallback, "get", v8::Value, Object)          \
-  F(GenericNamedPropertyQueryCallback, "has", v8::Integer, Object) \
-  F(GenericNamedPropertyDeleterCallback, "delete", v8::Boolean, Object)
+  inline Handle<Object> Call(AccessorNameGetterCallback f, Handle<Name> name);
+  inline Handle<Object> Call(GenericNamedPropertyQueryCallback f,
+                             Handle<Name> name);
+  inline Handle<Object> Call(GenericNamedPropertyDeleterCallback f,
+                             Handle<Name> name);
 
-#define WRITE_CALL_1_NAME(Function, type, ApiReturn, InternalReturn)         \
-  Handle<InternalReturn> Call(Function f, Handle<Name> name) {               \
-    Isolate* isolate = this->isolate();                                      \
-    RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function);       \
-    VMState<EXTERNAL> state(isolate);                                        \
-    ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));             \
-    PropertyCallbackInfo<ApiReturn> info(begin());                           \
-    LOG(isolate,                                                             \
-        ApiNamedPropertyAccess("interceptor-named-" type, holder(), *name)); \
-    f(v8::Utils::ToLocal(name), info);                                       \
-    return GetReturnValue<InternalReturn>(isolate);                          \
-  }
+  inline Handle<Object> Call(IndexedPropertyGetterCallback f, uint32_t index);
+  inline Handle<Object> Call(IndexedPropertyQueryCallback f, uint32_t index);
+  inline Handle<Object> Call(IndexedPropertyDeleterCallback f, uint32_t index);
 
-  FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(WRITE_CALL_1_NAME)
+  inline Handle<Object> Call(GenericNamedPropertySetterCallback f,
+                             Handle<Name> name, Handle<Object> value);
 
-#undef FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME
-#undef WRITE_CALL_1_NAME
+  inline Handle<Object> Call(IndexedPropertySetterCallback f, uint32_t index,
+                             Handle<Object> value);
 
-#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX(F)            \
-  F(IndexedPropertyGetterCallback, "get", v8::Value, Object)  \
-  F(IndexedPropertyQueryCallback, "has", v8::Integer, Object) \
-  F(IndexedPropertyDeleterCallback, "delete", v8::Boolean, Object)
-
-#define WRITE_CALL_1_INDEX(Function, type, ApiReturn, InternalReturn)  \
-  Handle<InternalReturn> Call(Function f, uint32_t index) {            \
-    Isolate* isolate = this->isolate();                                \
-    RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function); \
-    VMState<EXTERNAL> state(isolate);                                  \
-    ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));       \
-    PropertyCallbackInfo<ApiReturn> info(begin());                     \
-    LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-" type, \
-                                          holder(), index));           \
-    f(index, info);                                                    \
-    return GetReturnValue<InternalReturn>(isolate);                    \
-  }
-
-  FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX(WRITE_CALL_1_INDEX)
-
-#undef FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX
-#undef WRITE_CALL_1_INDEX
-
-  Handle<Object> Call(GenericNamedPropertySetterCallback f, Handle<Name> name,
-                      Handle<Object> value) {
-    Isolate* isolate = this->isolate();
-    RuntimeCallTimerScope timer(
-        isolate, &RuntimeCallStats::GenericNamedPropertySetterCallback);
-    VMState<EXTERNAL> state(isolate);
-    ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
-    PropertyCallbackInfo<v8::Value> info(begin());
-    LOG(isolate,
-        ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
-    f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
-    return GetReturnValue<Object>(isolate);
-  }
-
-  Handle<Object> Call(IndexedPropertySetterCallback f, uint32_t index,
-                      Handle<Object> value) {
-    Isolate* isolate = this->isolate();
-    RuntimeCallTimerScope timer(
-        isolate, &RuntimeCallStats::IndexedPropertySetterCallback);
-    VMState<EXTERNAL> state(isolate);
-    ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
-    PropertyCallbackInfo<v8::Value> info(begin());
-    LOG(isolate,
-        ApiIndexedPropertyAccess("interceptor-indexed-set", holder(), index));
-    f(index, v8::Utils::ToLocal(value), info);
-    return GetReturnValue<Object>(isolate);
-  }
-
-  void Call(AccessorNameSetterCallback f, Handle<Name> name,
-            Handle<Object> value) {
-    Isolate* isolate = this->isolate();
-    RuntimeCallTimerScope timer(isolate,
-                                &RuntimeCallStats::AccessorNameSetterCallback);
-    VMState<EXTERNAL> state(isolate);
-    ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
-    PropertyCallbackInfo<void> info(begin());
-    LOG(isolate,
-        ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
-    f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
-  }
+  inline void Call(AccessorNameSetterCallback f, Handle<Name> name,
+                   Handle<Object> value);
 
  private:
   inline JSObject* holder() {
diff --git a/src/api-natives.cc b/src/api-natives.cc
index fcd19cc..f09f42d 100644
--- a/src/api-natives.cc
+++ b/src/api-natives.cc
@@ -156,20 +156,25 @@
 // Returns parent function template or null.
 FunctionTemplateInfo* GetParent(FunctionTemplateInfo* data) {
   Object* parent = data->parent_template();
-  return parent->IsUndefined() ? nullptr : FunctionTemplateInfo::cast(parent);
+  return parent->IsUndefined(data->GetIsolate())
+             ? nullptr
+             : FunctionTemplateInfo::cast(parent);
 }
 
 // Starting from given object template's constructor walk up the inheritance
 // chain till a function template that has an instance template is found.
 ObjectTemplateInfo* GetParent(ObjectTemplateInfo* data) {
   Object* maybe_ctor = data->constructor();
-  if (maybe_ctor->IsUndefined()) return nullptr;
+  Isolate* isolate = data->GetIsolate();
+  if (maybe_ctor->IsUndefined(isolate)) return nullptr;
   FunctionTemplateInfo* ctor = FunctionTemplateInfo::cast(maybe_ctor);
   while (true) {
     ctor = GetParent(ctor);
     if (ctor == nullptr) return nullptr;
     Object* maybe_obj = ctor->instance_template();
-    if (!maybe_obj->IsUndefined()) return ObjectTemplateInfo::cast(maybe_obj);
+    if (!maybe_obj->IsUndefined(isolate)) {
+      return ObjectTemplateInfo::cast(maybe_obj);
+    }
   }
 }
 
@@ -185,9 +190,9 @@
   int max_number_of_properties = 0;
   TemplateInfoT* info = *data;
   while (info != nullptr) {
-    if (!info->property_accessors()->IsUndefined()) {
+    if (!info->property_accessors()->IsUndefined(isolate)) {
       Object* props = info->property_accessors();
-      if (!props->IsUndefined()) {
+      if (!props->IsUndefined(isolate)) {
         Handle<Object> props_handle(props, isolate);
         NeanderArray props_array(props_handle);
         max_number_of_properties += props_array.length();
@@ -205,7 +210,7 @@
     info = *data;
     while (info != nullptr) {
       // Accumulate accessors.
-      if (!info->property_accessors()->IsUndefined()) {
+      if (!info->property_accessors()->IsUndefined(isolate)) {
         Handle<Object> props(info->property_accessors(), isolate);
         valid_descriptors =
             AccessorInfo::AppendUnique(props, array, valid_descriptors);
@@ -221,7 +226,7 @@
   }
 
   auto property_list = handle(data->property_list(), isolate);
-  if (property_list->IsUndefined()) return obj;
+  if (property_list->IsUndefined(isolate)) return obj;
   // TODO(dcarney): just use a FixedArray here.
   NeanderArray properties(property_list);
   if (properties.length() == 0) return obj;
@@ -282,7 +287,7 @@
   Handle<Object> result =
       UnseededNumberDictionary::DeleteProperty(cache, entry);
   USE(result);
-  DCHECK(result->IsTrue());
+  DCHECK(result->IsTrue(isolate));
   auto new_cache = UnseededNumberDictionary::Shrink(cache, entry);
   isolate->native_context()->set_template_instantiations_cache(*new_cache);
 }
@@ -323,7 +328,7 @@
 
   if (constructor.is_null()) {
     Handle<Object> cons(info->constructor(), isolate);
-    if (cons->IsUndefined()) {
+    if (cons->IsUndefined(isolate)) {
       constructor = isolate->object_function();
     } else {
       auto cons_templ = Handle<FunctionTemplateInfo>::cast(cons);
@@ -371,7 +376,7 @@
   Handle<JSObject> prototype;
   if (!data->remove_prototype()) {
     auto prototype_templ = handle(data->prototype_template(), isolate);
-    if (prototype_templ->IsUndefined()) {
+    if (prototype_templ->IsUndefined(isolate)) {
       prototype = isolate->factory()->NewJSObject(isolate->object_function());
     } else {
       ASSIGN_RETURN_ON_EXCEPTION(
@@ -382,7 +387,7 @@
           JSFunction);
     }
     auto parent = handle(data->parent_template(), isolate);
-    if (!parent->IsUndefined()) {
+    if (!parent->IsUndefined(isolate)) {
       Handle<JSFunction> parent_instance;
       ASSIGN_RETURN_ON_EXCEPTION(
           isolate, parent_instance,
@@ -445,7 +450,7 @@
 void AddPropertyToPropertyList(Isolate* isolate, Handle<TemplateInfo> templ,
                                int length, Handle<Object>* data) {
   auto list = handle(templ->property_list(), isolate);
-  if (list->IsUndefined()) {
+  if (list->IsUndefined(isolate)) {
     list = NeanderArray(isolate).value();
     templ->set_property_list(*list);
   }
@@ -520,7 +525,7 @@
                                        Handle<TemplateInfo> info,
                                        Handle<AccessorInfo> property) {
   auto list = handle(info->property_accessors(), isolate);
-  if (list->IsUndefined()) {
+  if (list->IsUndefined(isolate)) {
     list = NeanderArray(isolate).value();
     info->set_property_accessors(*list);
   }
@@ -532,95 +537,77 @@
 Handle<JSFunction> ApiNatives::CreateApiFunction(
     Isolate* isolate, Handle<FunctionTemplateInfo> obj,
     Handle<Object> prototype, ApiInstanceType instance_type) {
-  Handle<Code> code;
-  if (obj->call_code()->IsCallHandlerInfo() &&
-      CallHandlerInfo::cast(obj->call_code())->fast_handler()->IsCode()) {
-    code = isolate->builtins()->HandleFastApiCall();
-  } else {
-    code = isolate->builtins()->HandleApiCall();
-  }
-  Handle<Code> construct_stub =
-      prototype.is_null() ? isolate->builtins()->ConstructedNonConstructable()
-                          : isolate->builtins()->JSConstructStubApi();
-
-  obj->set_instantiated(true);
-  Handle<JSFunction> result;
-  if (obj->remove_prototype()) {
-    result = isolate->factory()->NewFunctionWithoutPrototype(
-        isolate->factory()->empty_string(), code);
-  } else {
-    int internal_field_count = 0;
-    if (!obj->instance_template()->IsUndefined()) {
-      Handle<ObjectTemplateInfo> instance_template = Handle<ObjectTemplateInfo>(
-          ObjectTemplateInfo::cast(obj->instance_template()));
-      internal_field_count =
-          Smi::cast(instance_template->internal_field_count())->value();
-    }
-
-    // TODO(svenpanne) Kill ApiInstanceType and refactor things by generalizing
-    // JSObject::GetHeaderSize.
-    int instance_size = kPointerSize * internal_field_count;
-    InstanceType type;
-    switch (instance_type) {
-      case JavaScriptObjectType:
-        if (!obj->needs_access_check() &&
-            obj->named_property_handler()->IsUndefined() &&
-            obj->indexed_property_handler()->IsUndefined()) {
-          type = JS_API_OBJECT_TYPE;
-        } else {
-          type = JS_SPECIAL_API_OBJECT_TYPE;
-        }
-        instance_size += JSObject::kHeaderSize;
-        break;
-      case GlobalObjectType:
-        type = JS_GLOBAL_OBJECT_TYPE;
-        instance_size += JSGlobalObject::kSize;
-        break;
-      case GlobalProxyType:
-        type = JS_GLOBAL_PROXY_TYPE;
-        instance_size += JSGlobalProxy::kSize;
-        break;
-      default:
-        UNREACHABLE();
-        type = JS_OBJECT_TYPE;  // Keep the compiler happy.
-        break;
-    }
-
-    result = isolate->factory()->NewFunction(
-        isolate->factory()->empty_string(), code, prototype, type,
-        instance_size, obj->read_only_prototype(), true);
-  }
-
-  result->shared()->set_length(obj->length());
-  Handle<Object> class_name(obj->class_name(), isolate);
-  if (class_name->IsString()) {
-    result->shared()->set_instance_class_name(*class_name);
-    result->shared()->set_name(*class_name);
-  }
-  result->shared()->set_api_func_data(*obj);
-  result->shared()->set_construct_stub(*construct_stub);
-  result->shared()->DontAdaptArguments();
+  Handle<SharedFunctionInfo> shared =
+      FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(isolate, obj);
+  Handle<JSFunction> result =
+      isolate->factory()->NewFunctionFromSharedFunctionInfo(
+          shared, isolate->native_context());
 
   if (obj->remove_prototype()) {
+    result->set_map(*isolate->sloppy_function_without_prototype_map());
+    DCHECK(prototype.is_null());
     DCHECK(result->shared()->IsApiFunction());
     DCHECK(!result->has_initial_map());
     DCHECK(!result->has_prototype());
+    DCHECK(!result->IsConstructor());
     return result;
   }
 
-#ifdef DEBUG
-  LookupIterator it(handle(JSObject::cast(result->prototype())),
-                    isolate->factory()->constructor_string(),
-                    LookupIterator::OWN_SKIP_INTERCEPTOR);
-  MaybeHandle<Object> maybe_prop = Object::GetProperty(&it);
-  DCHECK(it.IsFound());
-  DCHECK(maybe_prop.ToHandleChecked().is_identical_to(result));
-#endif
-
   // Down from here is only valid for API functions that can be used as a
   // constructor (don't set the "remove prototype" flag).
 
-  Handle<Map> map(result->initial_map());
+  if (obj->read_only_prototype()) {
+    result->set_map(*isolate->sloppy_function_with_readonly_prototype_map());
+  }
+
+  if (prototype->IsTheHole(isolate)) {
+    prototype = isolate->factory()->NewFunctionPrototype(result);
+  } else {
+    JSObject::AddProperty(Handle<JSObject>::cast(prototype),
+                          isolate->factory()->constructor_string(), result,
+                          DONT_ENUM);
+  }
+
+  int internal_field_count = 0;
+  if (!obj->instance_template()->IsUndefined(isolate)) {
+    Handle<ObjectTemplateInfo> instance_template = Handle<ObjectTemplateInfo>(
+        ObjectTemplateInfo::cast(obj->instance_template()));
+    internal_field_count =
+        Smi::cast(instance_template->internal_field_count())->value();
+  }
+
+  // TODO(svenpanne) Kill ApiInstanceType and refactor things by generalizing
+  // JSObject::GetHeaderSize.
+  int instance_size = kPointerSize * internal_field_count;
+  InstanceType type;
+  switch (instance_type) {
+    case JavaScriptObjectType:
+      if (!obj->needs_access_check() &&
+          obj->named_property_handler()->IsUndefined(isolate) &&
+          obj->indexed_property_handler()->IsUndefined(isolate)) {
+        type = JS_API_OBJECT_TYPE;
+      } else {
+        type = JS_SPECIAL_API_OBJECT_TYPE;
+      }
+      instance_size += JSObject::kHeaderSize;
+      break;
+    case GlobalObjectType:
+      type = JS_GLOBAL_OBJECT_TYPE;
+      instance_size += JSGlobalObject::kSize;
+      break;
+    case GlobalProxyType:
+      type = JS_GLOBAL_PROXY_TYPE;
+      instance_size += JSGlobalProxy::kSize;
+      break;
+    default:
+      UNREACHABLE();
+      type = JS_OBJECT_TYPE;  // Keep the compiler happy.
+      break;
+  }
+
+  Handle<Map> map =
+      isolate->factory()->NewMap(type, instance_size, FAST_HOLEY_SMI_ELEMENTS);
+  JSFunction::SetInitialMap(result, map, Handle<JSObject>::cast(prototype));
 
   // Mark as undetectable if needed.
   if (obj->undetectable()) {
@@ -633,20 +620,19 @@
   }
 
   // Set interceptor information in the map.
-  if (!obj->named_property_handler()->IsUndefined()) {
+  if (!obj->named_property_handler()->IsUndefined(isolate)) {
     map->set_has_named_interceptor();
   }
-  if (!obj->indexed_property_handler()->IsUndefined()) {
+  if (!obj->indexed_property_handler()->IsUndefined(isolate)) {
     map->set_has_indexed_interceptor();
   }
 
   // Mark instance as callable in the map.
-  if (!obj->instance_call_handler()->IsUndefined()) {
+  if (!obj->instance_call_handler()->IsUndefined(isolate)) {
     map->set_is_callable();
     map->set_is_constructor(true);
   }
 
-  DCHECK(result->shared()->IsApiFunction());
   return result;
 }
 
diff --git a/src/api.cc b/src/api.cc
index f757d1d..04d8cb3 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -15,6 +15,7 @@
 #include "include/v8-experimental.h"
 #include "include/v8-profiler.h"
 #include "include/v8-testing.h"
+#include "include/v8-util.h"
 #include "src/accessors.h"
 #include "src/api-experimental.h"
 #include "src/api-natives.h"
@@ -40,6 +41,7 @@
 #include "src/icu_util.h"
 #include "src/isolate-inl.h"
 #include "src/json-parser.h"
+#include "src/json-stringifier.h"
 #include "src/messages.h"
 #include "src/parsing/parser.h"
 #include "src/parsing/scanner-character-streams.h"
@@ -382,91 +384,159 @@
   return true;
 }
 
-StartupData SerializeIsolateAndContext(
-    Isolate* isolate, Persistent<Context>* context,
-    i::Snapshot::Metadata metadata,
-    i::StartupSerializer::FunctionCodeHandling function_code_handling) {
-  if (context->IsEmpty()) return {NULL, 0};
+struct SnapshotCreatorData {
+  explicit SnapshotCreatorData(Isolate* isolate)
+      : isolate_(isolate),
+        contexts_(isolate),
+        templates_(isolate),
+        created_(false) {}
 
-  i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  static SnapshotCreatorData* cast(void* data) {
+    return reinterpret_cast<SnapshotCreatorData*>(data);
+  }
+
+  ArrayBufferAllocator allocator_;
+  Isolate* isolate_;
+  PersistentValueVector<Context> contexts_;
+  PersistentValueVector<Template> templates_;
+  bool created_;
+};
+
+}  // namespace
+
+SnapshotCreator::SnapshotCreator(intptr_t* external_references,
+                                 StartupData* existing_snapshot) {
+  i::Isolate* internal_isolate = new i::Isolate(true);
+  Isolate* isolate = reinterpret_cast<Isolate*>(internal_isolate);
+  SnapshotCreatorData* data = new SnapshotCreatorData(isolate);
+  data->isolate_ = isolate;
+  internal_isolate->set_array_buffer_allocator(&data->allocator_);
+  internal_isolate->set_api_external_references(external_references);
+  isolate->Enter();
+  if (existing_snapshot) {
+    internal_isolate->set_snapshot_blob(existing_snapshot);
+    i::Snapshot::Initialize(internal_isolate);
+  } else {
+    internal_isolate->Init(nullptr);
+  }
+  data_ = data;
+}
+
+SnapshotCreator::~SnapshotCreator() {
+  SnapshotCreatorData* data = SnapshotCreatorData::cast(data_);
+  DCHECK(data->created_);
+  Isolate* isolate = data->isolate_;
+  isolate->Exit();
+  isolate->Dispose();
+  delete data;
+}
+
+Isolate* SnapshotCreator::GetIsolate() {
+  return SnapshotCreatorData::cast(data_)->isolate_;
+}
+
+size_t SnapshotCreator::AddContext(Local<Context> context) {
+  DCHECK(!context.IsEmpty());
+  SnapshotCreatorData* data = SnapshotCreatorData::cast(data_);
+  DCHECK(!data->created_);
+  Isolate* isolate = data->isolate_;
+  CHECK_EQ(isolate, context->GetIsolate());
+  size_t index = static_cast<int>(data->contexts_.Size());
+  data->contexts_.Append(context);
+  return index;
+}
+
+size_t SnapshotCreator::AddTemplate(Local<Template> template_obj) {
+  DCHECK(!template_obj.IsEmpty());
+  SnapshotCreatorData* data = SnapshotCreatorData::cast(data_);
+  DCHECK(!data->created_);
+  DCHECK_EQ(reinterpret_cast<i::Isolate*>(data->isolate_),
+            Utils::OpenHandle(*template_obj)->GetIsolate());
+  size_t index = static_cast<int>(data->templates_.Size());
+  data->templates_.Append(template_obj);
+  return index;
+}
+
+StartupData SnapshotCreator::CreateBlob(
+    SnapshotCreator::FunctionCodeHandling function_code_handling) {
+  SnapshotCreatorData* data = SnapshotCreatorData::cast(data_);
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(data->isolate_);
+  DCHECK(!data->created_);
+
+  {
+    int num_templates = static_cast<int>(data->templates_.Size());
+    i::HandleScope scope(isolate);
+    i::Handle<i::FixedArray> templates =
+        isolate->factory()->NewFixedArray(num_templates, i::TENURED);
+    for (int i = 0; i < num_templates; i++) {
+      templates->set(i, *v8::Utils::OpenHandle(*data->templates_.Get(i)));
+    }
+    isolate->heap()->SetSerializedTemplates(*templates);
+    data->templates_.Clear();
+  }
 
   // If we don't do this then we end up with a stray root pointing at the
   // context even after we have disposed of the context.
-  internal_isolate->heap()->CollectAllAvailableGarbage("mksnapshot");
+  isolate->heap()->CollectAllAvailableGarbage("mksnapshot");
+  isolate->heap()->CompactWeakFixedArrays();
 
-  // GC may have cleared weak cells, so compact any WeakFixedArrays
-  // found on the heap.
-  i::HeapIterator iterator(internal_isolate->heap(),
-                           i::HeapIterator::kFilterUnreachable);
-  for (i::HeapObject* o = iterator.next(); o != NULL; o = iterator.next()) {
-    if (o->IsPrototypeInfo()) {
-      i::Object* prototype_users = i::PrototypeInfo::cast(o)->prototype_users();
-      if (prototype_users->IsWeakFixedArray()) {
-        i::WeakFixedArray* array = i::WeakFixedArray::cast(prototype_users);
-        array->Compact<i::JSObject::PrototypeRegistryCompactionCallback>();
-      }
-    } else if (o->IsScript()) {
-      i::Object* shared_list = i::Script::cast(o)->shared_function_infos();
-      if (shared_list->IsWeakFixedArray()) {
-        i::WeakFixedArray* array = i::WeakFixedArray::cast(shared_list);
-        array->Compact<i::WeakFixedArray::NullCallback>();
-      }
-    }
+  i::DisallowHeapAllocation no_gc_from_here_on;
+
+  int num_contexts = static_cast<int>(data->contexts_.Size());
+  i::List<i::Object*> contexts(num_contexts);
+  for (int i = 0; i < num_contexts; i++) {
+    i::HandleScope scope(isolate);
+    i::Handle<i::Context> context =
+        v8::Utils::OpenHandle(*data->contexts_.Get(i));
+    contexts.Add(*context);
+  }
+  data->contexts_.Clear();
+
+  i::StartupSerializer startup_serializer(isolate, function_code_handling);
+  startup_serializer.SerializeStrongReferences();
+
+  // Serialize each context with a new partial serializer.
+  i::List<i::SnapshotData*> context_snapshots(num_contexts);
+  for (int i = 0; i < num_contexts; i++) {
+    i::PartialSerializer partial_serializer(isolate, &startup_serializer);
+    partial_serializer.Serialize(&contexts[i]);
+    context_snapshots.Add(new i::SnapshotData(&partial_serializer));
   }
 
-  i::Object* raw_context = *v8::Utils::OpenPersistent(*context);
-  context->Reset();
+  startup_serializer.SerializeWeakReferencesAndDeferred();
+  i::SnapshotData startup_snapshot(&startup_serializer);
+  StartupData result =
+      i::Snapshot::CreateSnapshotBlob(&startup_snapshot, &context_snapshots);
 
-  i::SnapshotByteSink snapshot_sink;
-  i::StartupSerializer ser(internal_isolate, &snapshot_sink,
-                           function_code_handling);
-  ser.SerializeStrongReferences();
-
-  i::SnapshotByteSink context_sink;
-  i::PartialSerializer context_ser(internal_isolate, &ser, &context_sink);
-  context_ser.Serialize(&raw_context);
-  ser.SerializeWeakReferencesAndDeferred();
-
-  return i::Snapshot::CreateSnapshotBlob(ser, context_ser, metadata);
+  // Delete heap-allocated context snapshot instances.
+  for (const auto& context_snapshot : context_snapshots) {
+    delete context_snapshot;
+  }
+  data->created_ = true;
+  return result;
 }
 
-}  // namespace
-
 StartupData V8::CreateSnapshotDataBlob(const char* embedded_source) {
   // Create a new isolate and a new context from scratch, optionally run
   // a script to embed, and serialize to create a snapshot blob.
-  StartupData result = {NULL, 0};
-
+  StartupData result = {nullptr, 0};
   base::ElapsedTimer timer;
   timer.Start();
-
-  ArrayBufferAllocator allocator;
-  i::Isolate* internal_isolate = new i::Isolate(true);
-  internal_isolate->set_array_buffer_allocator(&allocator);
-  Isolate* isolate = reinterpret_cast<Isolate*>(internal_isolate);
-
   {
-    Isolate::Scope isolate_scope(isolate);
-    internal_isolate->Init(NULL);
-    Persistent<Context> context;
+    SnapshotCreator snapshot_creator;
+    Isolate* isolate = snapshot_creator.GetIsolate();
     {
-      HandleScope handle_scope(isolate);
-      Local<Context> new_context = Context::New(isolate);
-      context.Reset(isolate, new_context);
+      HandleScope scope(isolate);
+      Local<Context> context = Context::New(isolate);
       if (embedded_source != NULL &&
-          !RunExtraCode(isolate, new_context, embedded_source, "<embedded>")) {
-        context.Reset();
+          !RunExtraCode(isolate, context, embedded_source, "<embedded>")) {
+        return result;
       }
+      snapshot_creator.AddContext(context);
     }
-
-    i::Snapshot::Metadata metadata;
-    metadata.set_embeds_script(embedded_source != NULL);
-
-    result = SerializeIsolateAndContext(
-        isolate, &context, metadata, i::StartupSerializer::CLEAR_FUNCTION_CODE);
-    DCHECK(context.IsEmpty());
+    result = snapshot_creator.CreateBlob(
+        SnapshotCreator::FunctionCodeHandling::kClear);
   }
-  isolate->Dispose();
 
   if (i::FLAG_profile_deserialization) {
     i::PrintF("Creating snapshot took %0.3f ms\n",
@@ -486,42 +556,28 @@
   //    compilation of executed functions.
   //  - Create a new context. This context will be unpolluted.
   //  - Serialize the isolate and the second context into a new snapshot blob.
-  StartupData result = {NULL, 0};
-
+  StartupData result = {nullptr, 0};
   base::ElapsedTimer timer;
   timer.Start();
-
-  ArrayBufferAllocator allocator;
-  i::Isolate* internal_isolate = new i::Isolate(true);
-  internal_isolate->set_array_buffer_allocator(&allocator);
-  internal_isolate->set_snapshot_blob(&cold_snapshot_blob);
-  Isolate* isolate = reinterpret_cast<Isolate*>(internal_isolate);
-
   {
-    Isolate::Scope isolate_scope(isolate);
-    i::Snapshot::Initialize(internal_isolate);
-    Persistent<Context> context;
-    bool success;
+    SnapshotCreator snapshot_creator(nullptr, &cold_snapshot_blob);
+    Isolate* isolate = snapshot_creator.GetIsolate();
+    {
+      HandleScope scope(isolate);
+      Local<Context> context = Context::New(isolate);
+      if (!RunExtraCode(isolate, context, warmup_source, "<warm-up>")) {
+        return result;
+      }
+    }
     {
       HandleScope handle_scope(isolate);
-      Local<Context> new_context = Context::New(isolate);
-      success = RunExtraCode(isolate, new_context, warmup_source, "<warm-up>");
-    }
-    if (success) {
-      HandleScope handle_scope(isolate);
       isolate->ContextDisposedNotification(false);
-      Local<Context> new_context = Context::New(isolate);
-      context.Reset(isolate, new_context);
+      Local<Context> context = Context::New(isolate);
+      snapshot_creator.AddContext(context);
     }
-
-    i::Snapshot::Metadata metadata;
-    metadata.set_embeds_script(i::Snapshot::EmbedsScript(internal_isolate));
-
-    result = SerializeIsolateAndContext(
-        isolate, &context, metadata, i::StartupSerializer::KEEP_FUNCTION_CODE);
-    DCHECK(context.IsEmpty());
+    result = snapshot_creator.CreateBlob(
+        SnapshotCreator::FunctionCodeHandling::kKeep);
   }
-  isolate->Dispose();
 
   if (i::FLAG_profile_deserialization) {
     i::PrintF("Warming up snapshot took %0.3f ms\n",
@@ -811,9 +867,8 @@
 
 i::Object** EscapableHandleScope::Escape(i::Object** escape_value) {
   i::Heap* heap = reinterpret_cast<i::Isolate*>(GetIsolate())->heap();
-  Utils::ApiCheck(*escape_slot_ == heap->the_hole_value(),
-                  "EscapeableHandleScope::Escape",
-                  "Escape value set twice");
+  Utils::ApiCheck((*escape_slot_)->IsTheHole(heap->isolate()),
+                  "EscapeableHandleScope::Escape", "Escape value set twice");
   if (escape_value == NULL) {
     *escape_slot_ = heap->undefined_value();
     return NULL;
@@ -1074,7 +1129,7 @@
   ENTER_V8(i_isolate);
   i::Handle<i::Object> result(Utils::OpenHandle(this)->prototype_template(),
                               i_isolate);
-  if (result->IsUndefined()) {
+  if (result->IsUndefined(i_isolate)) {
     // Do not cache prototype objects.
     result = Utils::OpenHandle(
         *ObjectTemplateNew(i_isolate, Local<FunctionTemplate>(), true));
@@ -1112,8 +1167,7 @@
   obj->set_do_not_cache(do_not_cache);
   int next_serial_number = 0;
   if (!do_not_cache) {
-    next_serial_number = isolate->next_serial_number() + 1;
-    isolate->set_next_serial_number(next_serial_number);
+    next_serial_number = isolate->heap()->GetNextTemplateSerialNumber();
   }
   obj->set_serial_number(i::Smi::FromInt(next_serial_number));
   if (callback != 0) {
@@ -1138,7 +1192,6 @@
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   // Changes to the environment cannot be captured in the snapshot. Expect no
   // function templates when the isolate is created for serialization.
-  DCHECK(!i_isolate->serializer_enabled());
   LOG_API(i_isolate, FunctionTemplate, New);
   ENTER_V8(i_isolate);
   auto templ = FunctionTemplateNew(i_isolate, callback, nullptr, data,
@@ -1147,6 +1200,20 @@
   return templ;
 }
 
+Local<FunctionTemplate> FunctionTemplate::FromSnapshot(Isolate* isolate,
+                                                       size_t index) {
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  i::FixedArray* templates = i_isolate->heap()->serialized_templates();
+  int int_index = static_cast<int>(index);
+  if (int_index < templates->length()) {
+    i::Object* info = i_isolate->heap()->serialized_templates()->get(int_index);
+    if (info->IsFunctionTemplateInfo()) {
+      return Utils::ToLocal(i::Handle<i::FunctionTemplateInfo>(
+          i::FunctionTemplateInfo::cast(info)));
+    }
+  }
+  return Local<FunctionTemplate>();
+}
 
 Local<FunctionTemplate> FunctionTemplate::NewWithFastHandler(
     Isolate* isolate, FunctionCallback callback,
@@ -1254,7 +1321,7 @@
   }
   i::Isolate* isolate = handle->GetIsolate();
   ENTER_V8(isolate);
-  if (handle->instance_template()->IsUndefined()) {
+  if (handle->instance_template()->IsUndefined(isolate)) {
     Local<ObjectTemplate> templ =
         ObjectTemplate::New(isolate, ToApiHandle<FunctionTemplate>(handle));
     handle->set_instance_template(*Utils::OpenHandle(*templ));
@@ -1335,9 +1402,6 @@
 static Local<ObjectTemplate> ObjectTemplateNew(
     i::Isolate* isolate, v8::Local<FunctionTemplate> constructor,
     bool do_not_cache) {
-  // Changes to the environment cannot be captured in the snapshot. Expect no
-  // object templates when the isolate is created for serialization.
-  DCHECK(!isolate->serializer_enabled());
   LOG_API(isolate, ObjectTemplate, New);
   ENTER_V8(isolate);
   i::Handle<i::Struct> struct_obj =
@@ -1347,8 +1411,7 @@
   InitializeTemplate(obj, Consts::OBJECT_TEMPLATE);
   int next_serial_number = 0;
   if (!do_not_cache) {
-    next_serial_number = isolate->next_serial_number() + 1;
-    isolate->set_next_serial_number(next_serial_number);
+    next_serial_number = isolate->heap()->GetNextTemplateSerialNumber();
   }
   obj->set_serial_number(i::Smi::FromInt(next_serial_number));
   if (!constructor.IsEmpty())
@@ -1362,13 +1425,28 @@
   return ObjectTemplateNew(isolate, constructor, false);
 }
 
+Local<ObjectTemplate> ObjectTemplate::FromSnapshot(Isolate* isolate,
+                                                   size_t index) {
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  i::FixedArray* templates = i_isolate->heap()->serialized_templates();
+  int int_index = static_cast<int>(index);
+  if (int_index < templates->length()) {
+    i::Object* info = i_isolate->heap()->serialized_templates()->get(int_index);
+    if (info->IsObjectTemplateInfo()) {
+      return Utils::ToLocal(
+          i::Handle<i::ObjectTemplateInfo>(i::ObjectTemplateInfo::cast(info)));
+    }
+  }
+  return Local<ObjectTemplate>();
+}
+
 // Ensure that the object template has a constructor.  If no
 // constructor is available we create one.
 static i::Handle<i::FunctionTemplateInfo> EnsureConstructor(
     i::Isolate* isolate,
     ObjectTemplate* object_template) {
   i::Object* obj = Utils::OpenHandle(object_template)->constructor();
-  if (!obj ->IsUndefined()) {
+  if (!obj->IsUndefined(isolate)) {
     i::FunctionTemplateInfo* info = i::FunctionTemplateInfo::cast(obj);
     return i::Handle<i::FunctionTemplateInfo>(info, isolate);
   }
@@ -1457,20 +1535,12 @@
                       signature, i::FLAG_disable_old_api_accessors);
 }
 
-
 template <typename Getter, typename Setter, typename Query, typename Deleter,
           typename Enumerator>
-static void ObjectTemplateSetNamedPropertyHandler(ObjectTemplate* templ,
-                                                  Getter getter, Setter setter,
-                                                  Query query, Deleter remover,
-                                                  Enumerator enumerator,
-                                                  Local<Value> data,
-                                                  PropertyHandlerFlags flags) {
-  i::Isolate* isolate = Utils::OpenHandle(templ)->GetIsolate();
-  ENTER_V8(isolate);
-  i::HandleScope scope(isolate);
-  auto cons = EnsureConstructor(isolate, templ);
-  EnsureNotInstantiated(cons, "ObjectTemplateSetNamedPropertyHandler");
+static i::Handle<i::InterceptorInfo> CreateInterceptorInfo(
+    i::Isolate* isolate, Getter getter, Setter setter, Query query,
+    Deleter remover, Enumerator enumerator, Local<Value> data,
+    PropertyHandlerFlags flags) {
   auto obj = i::Handle<i::InterceptorInfo>::cast(
       isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE));
   obj->set_flags(0);
@@ -1492,6 +1562,24 @@
     data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
   }
   obj->set_data(*Utils::OpenHandle(*data));
+  return obj;
+}
+
+template <typename Getter, typename Setter, typename Query, typename Deleter,
+          typename Enumerator>
+static void ObjectTemplateSetNamedPropertyHandler(ObjectTemplate* templ,
+                                                  Getter getter, Setter setter,
+                                                  Query query, Deleter remover,
+                                                  Enumerator enumerator,
+                                                  Local<Value> data,
+                                                  PropertyHandlerFlags flags) {
+  i::Isolate* isolate = Utils::OpenHandle(templ)->GetIsolate();
+  ENTER_V8(isolate);
+  i::HandleScope scope(isolate);
+  auto cons = EnsureConstructor(isolate, templ);
+  EnsureNotInstantiated(cons, "ObjectTemplateSetNamedPropertyHandler");
+  auto obj = CreateInterceptorInfo(isolate, getter, setter, query, remover,
+                                   enumerator, data, flags);
   cons->set_named_property_handler(*obj);
 }
 
@@ -1538,8 +1626,8 @@
       i::Handle<i::AccessCheckInfo>::cast(struct_info);
 
   SET_FIELD_WRAPPED(info, set_callback, callback);
-  SET_FIELD_WRAPPED(info, set_named_callback, nullptr);
-  SET_FIELD_WRAPPED(info, set_indexed_callback, nullptr);
+  info->set_named_interceptor(nullptr);
+  info->set_indexed_interceptor(nullptr);
 
   if (data.IsEmpty()) {
     data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
@@ -1550,28 +1638,34 @@
   cons->set_needs_access_check(true);
 }
 
-void ObjectTemplate::SetAccessCheckCallback(
-    DeprecatedAccessCheckCallback callback, Local<Value> data) {
-  SetAccessCheckCallback(reinterpret_cast<AccessCheckCallback>(callback), data);
-}
-
-void ObjectTemplate::SetAccessCheckCallbacks(
-    NamedSecurityCallback named_callback,
-    IndexedSecurityCallback indexed_callback, Local<Value> data) {
+void ObjectTemplate::SetAccessCheckCallbackAndHandler(
+    AccessCheckCallback callback,
+    const NamedPropertyHandlerConfiguration& named_handler,
+    const IndexedPropertyHandlerConfiguration& indexed_handler,
+    Local<Value> data) {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   auto cons = EnsureConstructor(isolate, this);
-  EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetAccessCheckCallbacks");
+  EnsureNotInstantiated(
+      cons, "v8::ObjectTemplate::SetAccessCheckCallbackWithHandler");
 
   i::Handle<i::Struct> struct_info =
       isolate->factory()->NewStruct(i::ACCESS_CHECK_INFO_TYPE);
   i::Handle<i::AccessCheckInfo> info =
       i::Handle<i::AccessCheckInfo>::cast(struct_info);
 
-  SET_FIELD_WRAPPED(info, set_callback, nullptr);
-  SET_FIELD_WRAPPED(info, set_named_callback, named_callback);
-  SET_FIELD_WRAPPED(info, set_indexed_callback, indexed_callback);
+  SET_FIELD_WRAPPED(info, set_callback, callback);
+  auto named_interceptor = CreateInterceptorInfo(
+      isolate, named_handler.getter, named_handler.setter, named_handler.query,
+      named_handler.deleter, named_handler.enumerator, named_handler.data,
+      named_handler.flags);
+  info->set_named_interceptor(*named_interceptor);
+  auto indexed_interceptor = CreateInterceptorInfo(
+      isolate, indexed_handler.getter, indexed_handler.setter,
+      indexed_handler.query, indexed_handler.deleter,
+      indexed_handler.enumerator, indexed_handler.data, indexed_handler.flags);
+  info->set_indexed_interceptor(*indexed_interceptor);
 
   if (data.IsEmpty()) {
     data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
@@ -1582,7 +1676,6 @@
   cons->set_needs_access_check(true);
 }
 
-
 void ObjectTemplate::SetHandler(
     const IndexedPropertyHandlerConfiguration& config) {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
@@ -1590,25 +1683,9 @@
   i::HandleScope scope(isolate);
   auto cons = EnsureConstructor(isolate, this);
   EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetHandler");
-  auto obj = i::Handle<i::InterceptorInfo>::cast(
-      isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE));
-  obj->set_flags(0);
-
-  if (config.getter != 0) SET_FIELD_WRAPPED(obj, set_getter, config.getter);
-  if (config.setter != 0) SET_FIELD_WRAPPED(obj, set_setter, config.setter);
-  if (config.query != 0) SET_FIELD_WRAPPED(obj, set_query, config.query);
-  if (config.deleter != 0) SET_FIELD_WRAPPED(obj, set_deleter, config.deleter);
-  if (config.enumerator != 0) {
-    SET_FIELD_WRAPPED(obj, set_enumerator, config.enumerator);
-  }
-  obj->set_all_can_read(static_cast<int>(config.flags) &
-                        static_cast<int>(PropertyHandlerFlags::kAllCanRead));
-
-  v8::Local<v8::Value> data = config.data;
-  if (data.IsEmpty()) {
-    data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
-  }
-  obj->set_data(*Utils::OpenHandle(*data));
+  auto obj = CreateInterceptorInfo(
+      isolate, config.getter, config.setter, config.query, config.deleter,
+      config.enumerator, config.data, config.flags);
   cons->set_indexed_property_handler(*obj);
 }
 
@@ -2257,7 +2334,7 @@
 
 
 bool v8::TryCatch::HasCaught() const {
-  return !reinterpret_cast<i::Object*>(exception_)->IsTheHole();
+  return !reinterpret_cast<i::Object*>(exception_)->IsTheHole(isolate_);
 }
 
 
@@ -2316,8 +2393,8 @@
 
 v8::Local<v8::Message> v8::TryCatch::Message() const {
   i::Object* message = reinterpret_cast<i::Object*>(message_obj_);
-  DCHECK(message->IsJSMessageObject() || message->IsTheHole());
-  if (HasCaught() && !message->IsTheHole()) {
+  DCHECK(message->IsJSMessageObject() || message->IsTheHole(isolate_));
+  if (HasCaught() && !message->IsTheHole(isolate_)) {
     return v8::Utils::MessageToLocal(i::Handle<i::Object>(message, isolate_));
   } else {
     return v8::Local<v8::Message>();
@@ -2626,7 +2703,7 @@
   i::Handle<i::JSObject> self = Utils::OpenHandle(f);
   i::Handle<i::Object> obj =
       i::JSReceiver::GetProperty(isolate, self, propertyName).ToHandleChecked();
-  return obj->IsTrue();
+  return obj->IsTrue(isolate);
 }
 
 bool StackFrame::IsEval() const { return getBoolProperty(this, "isEval"); }
@@ -2661,7 +2738,7 @@
   }
   i::Handle<i::ObjectHashTable> table(
       i::ObjectHashTable::cast(weak_collection->table()));
-  if (!table->IsKey(*key)) {
+  if (!table->IsKey(isolate, *key)) {
     DCHECK(false);
     return;
   }
@@ -2681,12 +2758,12 @@
   }
   i::Handle<i::ObjectHashTable> table(
       i::ObjectHashTable::cast(weak_collection->table()));
-  if (!table->IsKey(*key)) {
+  if (!table->IsKey(isolate, *key)) {
     DCHECK(false);
     return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
   }
   i::Handle<i::Object> lookup(table->Lookup(key), isolate);
-  if (lookup->IsTheHole())
+  if (lookup->IsTheHole(isolate))
     return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
   return Utils::ToLocal(lookup);
 }
@@ -2704,12 +2781,12 @@
   }
   i::Handle<i::ObjectHashTable> table(
       i::ObjectHashTable::cast(weak_collection->table()));
-  if (!table->IsKey(*key)) {
+  if (!table->IsKey(isolate, *key)) {
     DCHECK(false);
     return false;
   }
   i::Handle<i::Object> lookup(table->Lookup(key), isolate);
-  return !lookup->IsTheHole();
+  return !lookup->IsTheHole(isolate);
 }
 
 
@@ -2725,7 +2802,7 @@
   }
   i::Handle<i::ObjectHashTable> table(
       i::ObjectHashTable::cast(weak_collection->table()));
-  if (!table->IsKey(*key)) {
+  if (!table->IsKey(isolate, *key)) {
     DCHECK(false);
     return false;
   }
@@ -2741,9 +2818,10 @@
   PREPARE_FOR_EXECUTION_WITH_ISOLATE(isolate, JSON, Parse, Value);
   i::Handle<i::String> string = Utils::OpenHandle(*json_string);
   i::Handle<i::String> source = i::String::Flatten(string);
+  i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
   auto maybe = source->IsSeqOneByteString()
-                   ? i::JsonParser<true>::Parse(source)
-                   : i::JsonParser<false>::Parse(source);
+                   ? i::JsonParser<true>::Parse(isolate, source, undefined)
+                   : i::JsonParser<false>::Parse(isolate, source, undefined);
   Local<Value> result;
   has_pending_exception = !ToLocal<Value>(maybe, &result);
   RETURN_ON_FAILED_EXECUTION(Value);
@@ -2755,9 +2833,10 @@
   PREPARE_FOR_EXECUTION(context, JSON, Parse, Value);
   i::Handle<i::String> string = Utils::OpenHandle(*json_string);
   i::Handle<i::String> source = i::String::Flatten(string);
+  i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
   auto maybe = source->IsSeqOneByteString()
-                   ? i::JsonParser<true>::Parse(source)
-                   : i::JsonParser<false>::Parse(source);
+                   ? i::JsonParser<true>::Parse(isolate, source, undefined)
+                   : i::JsonParser<false>::Parse(isolate, source, undefined);
   Local<Value> result;
   has_pending_exception = !ToLocal<Value>(maybe, &result);
   RETURN_ON_FAILED_EXECUTION(Value);
@@ -2769,12 +2848,18 @@
 }
 
 MaybeLocal<String> JSON::Stringify(Local<Context> context,
-                                   Local<Object> json_object) {
+                                   Local<Object> json_object,
+                                   Local<String> gap) {
   PREPARE_FOR_EXECUTION(context, JSON, Stringify, String);
   i::Handle<i::Object> object = Utils::OpenHandle(*json_object);
+  i::Handle<i::Object> replacer = isolate->factory()->undefined_value();
+  i::Handle<i::String> gap_string = gap.IsEmpty()
+                                        ? isolate->factory()->empty_string()
+                                        : Utils::OpenHandle(*gap);
   i::Handle<i::Object> maybe;
-  has_pending_exception =
-      !i::Runtime::BasicJsonStringify(isolate, object).ToHandle(&maybe);
+  has_pending_exception = !i::JsonStringifier(isolate)
+                               .Stringify(object, replacer, gap_string)
+                               .ToHandle(&maybe);
   RETURN_ON_FAILED_EXECUTION(String);
   Local<String> result;
   has_pending_exception =
@@ -2786,26 +2871,38 @@
 // --- D a t a ---
 
 bool Value::FullIsUndefined() const {
-  bool result = Utils::OpenHandle(this)->IsUndefined();
+  i::Handle<i::Object> object = Utils::OpenHandle(this);
+  bool result = false;
+  if (!object->IsSmi()) {
+    result = object->IsUndefined(i::HeapObject::cast(*object)->GetIsolate());
+  }
   DCHECK_EQ(result, QuickIsUndefined());
   return result;
 }
 
 
 bool Value::FullIsNull() const {
-  bool result = Utils::OpenHandle(this)->IsNull();
+  i::Handle<i::Object> object = Utils::OpenHandle(this);
+  bool result = false;
+  if (!object->IsSmi()) {
+    result = object->IsNull(i::HeapObject::cast(*object)->GetIsolate());
+  }
   DCHECK_EQ(result, QuickIsNull());
   return result;
 }
 
 
 bool Value::IsTrue() const {
-  return Utils::OpenHandle(this)->IsTrue();
+  i::Handle<i::Object> object = Utils::OpenHandle(this);
+  if (object->IsSmi()) return false;
+  return object->IsTrue(i::HeapObject::cast(*object)->GetIsolate());
 }
 
 
 bool Value::IsFalse() const {
-  return Utils::OpenHandle(this)->IsFalse();
+  i::Handle<i::Object> object = Utils::OpenHandle(this);
+  if (object->IsSmi()) return false;
+  return object->IsFalse(i::HeapObject::cast(*object)->GetIsolate());
 }
 
 
@@ -2942,22 +3039,7 @@
 
 
 bool Value::IsNativeError() const {
-  i::Handle<i::Object> obj = Utils::OpenHandle(this);
-  if (!obj->IsJSObject()) return false;
-  i::Handle<i::JSObject> js_obj = i::Handle<i::JSObject>::cast(obj);
-  i::Isolate* isolate = js_obj->GetIsolate();
-  i::Handle<i::Object> constructor(js_obj->map()->GetConstructor(), isolate);
-  if (!constructor->IsJSFunction()) return false;
-  i::Handle<i::JSFunction> function =
-      i::Handle<i::JSFunction>::cast(constructor);
-  if (!function->shared()->native()) return false;
-  return function.is_identical_to(isolate->error_function()) ||
-         function.is_identical_to(isolate->eval_error_function()) ||
-         function.is_identical_to(isolate->range_error_function()) ||
-         function.is_identical_to(isolate->reference_error_function()) ||
-         function.is_identical_to(isolate->syntax_error_function()) ||
-         function.is_identical_to(isolate->type_error_function()) ||
-         function.is_identical_to(isolate->uri_error_function());
+  return Utils::OpenHandle(this)->IsJSError();
 }
 
 
@@ -2989,12 +3071,7 @@
   return Utils::OpenHandle(this)->IsJSSetIterator();
 }
 
-
-bool Value::IsPromise() const {
-  auto self = Utils::OpenHandle(this);
-  return i::Object::IsPromise(self);
-}
-
+bool Value::IsPromise() const { return Utils::OpenHandle(this)->IsJSPromise(); }
 
 MaybeLocal<String> Value::ToString(Local<Context> context) const {
   auto obj = Utils::OpenHandle(this);
@@ -3848,27 +3925,38 @@
     v8::Local<FunctionTemplate> tmpl) {
   auto isolate = Utils::OpenHandle(this)->GetIsolate();
   i::PrototypeIterator iter(isolate, *Utils::OpenHandle(this),
-                            i::PrototypeIterator::START_AT_RECEIVER);
+                            i::kStartAtReceiver);
   auto tmpl_info = *Utils::OpenHandle(*tmpl);
-  while (!tmpl_info->IsTemplateFor(iter.GetCurrent())) {
+  while (!tmpl_info->IsTemplateFor(iter.GetCurrent<i::JSObject>())) {
     iter.Advance();
-    if (iter.IsAtEnd()) {
-      return Local<Object>();
-    }
+    if (iter.IsAtEnd()) return Local<Object>();
+    if (!iter.GetCurrent()->IsJSObject()) return Local<Object>();
   }
   // IsTemplateFor() ensures that iter.GetCurrent() can't be a Proxy here.
   return Utils::ToLocal(i::handle(iter.GetCurrent<i::JSObject>(), isolate));
 }
 
-
 MaybeLocal<Array> v8::Object::GetPropertyNames(Local<Context> context) {
+  return GetPropertyNames(
+      context, v8::KeyCollectionMode::kIncludePrototypes,
+      static_cast<v8::PropertyFilter>(ONLY_ENUMERABLE | SKIP_SYMBOLS),
+      v8::IndexFilter::kIncludeIndices);
+}
+
+MaybeLocal<Array> v8::Object::GetPropertyNames(Local<Context> context,
+                                               KeyCollectionMode mode,
+                                               PropertyFilter property_filter,
+                                               IndexFilter index_filter) {
   PREPARE_FOR_EXECUTION(context, Object, GetPropertyNames, Array);
   auto self = Utils::OpenHandle(this);
   i::Handle<i::FixedArray> value;
-  has_pending_exception =
-      !i::JSReceiver::GetKeys(self, i::INCLUDE_PROTOS, i::ENUMERABLE_STRINGS)
-           .ToHandle(&value);
+  i::KeyAccumulator accumulator(
+      isolate, static_cast<i::KeyCollectionMode>(mode),
+      static_cast<i::PropertyFilter>(property_filter));
+  accumulator.set_skip_indices(index_filter == IndexFilter::kSkipIndices);
+  has_pending_exception = accumulator.CollectKeys(self, self).IsNothing();
   RETURN_ON_FAILED_EXECUTION(Array);
+  value = accumulator.GetKeys(i::GetKeysConversion::kKeepNumbers);
   DCHECK(self->map()->EnumLength() == i::kInvalidEnumCacheSentinel ||
          self->map()->EnumLength() == 0 ||
          self->map()->instance_descriptors()->GetEnumCache() != *value);
@@ -3894,19 +3982,8 @@
 
 MaybeLocal<Array> v8::Object::GetOwnPropertyNames(Local<Context> context,
                                                   PropertyFilter filter) {
-  PREPARE_FOR_EXECUTION(context, Object, GetOwnPropertyNames, Array);
-  auto self = Utils::OpenHandle(this);
-  i::Handle<i::FixedArray> value;
-  has_pending_exception =
-      !i::JSReceiver::GetKeys(self, i::OWN_ONLY,
-                              static_cast<i::PropertyFilter>(filter))
-           .ToHandle(&value);
-  RETURN_ON_FAILED_EXECUTION(Array);
-  DCHECK(self->map()->EnumLength() == i::kInvalidEnumCacheSentinel ||
-         self->map()->EnumLength() == 0 ||
-         self->map()->instance_descriptors()->GetEnumCache() != *value);
-  auto result = isolate->factory()->NewJSArrayWithElements(value);
-  RETURN_ESCAPED(Utils::ToLocal(result));
+  return GetPropertyNames(context, KeyCollectionMode::kOwnOnly, filter,
+                          v8::IndexFilter::kIncludeIndices);
 }
 
 MaybeLocal<String> v8::Object::ObjectProtoToString(Local<Context> context) {
@@ -4053,7 +4130,7 @@
   has_pending_exception =
       !i::JSObject::SetAccessor(obj, info).ToHandle(&result);
   RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
-  if (result->IsUndefined()) return Nothing<bool>();
+  if (result->IsUndefined(obj->GetIsolate())) return Nothing<bool>();
   if (fast) {
     i::JSObject::MigrateSlowToFast(obj, 0, "APISetAccessor");
   }
@@ -4343,7 +4420,7 @@
   auto isolate = Utils::OpenHandle(this)->GetIsolate();
   i::HandleScope scope(isolate);
   auto self = Utils::OpenHandle(this);
-  return i::JSReceiver::GetOrCreateIdentityHash(self)->value();
+  return i::JSReceiver::GetOrCreateIdentityHash(isolate, self)->value();
 }
 
 
@@ -5352,7 +5429,7 @@
 
 bool Boolean::Value() const {
   i::Handle<i::Object> obj = Utils::OpenHandle(this);
-  return obj->IsTrue();
+  return obj->IsTrue(i::HeapObject::cast(*obj)->GetIsolate());
 }
 
 
@@ -5443,7 +5520,10 @@
 
 static void* ExternalValue(i::Object* obj) {
   // Obscure semantics for undefined, but somehow checked in our unit tests...
-  if (obj->IsUndefined()) return NULL;
+  if (!obj->IsSmi() &&
+      obj->IsUndefined(i::HeapObject::cast(obj)->GetIsolate())) {
+    return NULL;
+  }
   i::Object* foreign = i::JSObject::cast(obj)->GetInternalField(0);
   return i::Foreign::cast(foreign)->foreign_address();
 }
@@ -5513,11 +5593,17 @@
       object_count_(0),
       object_size_(0) {}
 
+HeapCodeStatistics::HeapCodeStatistics()
+    : code_and_metadata_size_(0), bytecode_and_metadata_size_(0) {}
 
 bool v8::V8::InitializeICU(const char* icu_data_file) {
   return i::InitializeICU(icu_data_file);
 }
 
+bool v8::V8::InitializeICUDefaultLocation(const char* exec_path,
+                                          const char* icu_data_file) {
+  return i::InitializeICUDefaultLocation(exec_path, icu_data_file);
+}
 
 void v8::V8::InitializeExternalStartupData(const char* directory_path) {
   i::InitializeExternalStartupData(directory_path);
@@ -5534,11 +5620,10 @@
   return i::Version::GetVersion();
 }
 
-
 static i::Handle<i::Context> CreateEnvironment(
     i::Isolate* isolate, v8::ExtensionConfiguration* extensions,
     v8::Local<ObjectTemplate> global_template,
-    v8::Local<Value> maybe_global_proxy) {
+    v8::Local<Value> maybe_global_proxy, size_t context_snapshot_index) {
   i::Handle<i::Context> env;
 
   // Enter V8 via an ENTER_V8 scope.
@@ -5565,7 +5650,7 @@
       // Migrate security handlers from global_template to
       // proxy_template.  Temporarily removing access check
       // information from the global template.
-      if (!global_constructor->access_check_info()->IsUndefined()) {
+      if (!global_constructor->access_check_info()->IsUndefined(isolate)) {
         proxy_constructor->set_access_check_info(
             global_constructor->access_check_info());
         proxy_constructor->set_needs_access_check(
@@ -5583,7 +5668,7 @@
     }
     // Create the environment.
     env = isolate->bootstrapper()->CreateEnvironment(
-        maybe_proxy, proxy_template, extensions);
+        maybe_proxy, proxy_template, extensions, context_snapshot_index);
 
     // Restore the access check info on the global template.
     if (!global_template.IsEmpty()) {
@@ -5603,14 +5688,16 @@
 Local<Context> v8::Context::New(v8::Isolate* external_isolate,
                                 v8::ExtensionConfiguration* extensions,
                                 v8::Local<ObjectTemplate> global_template,
-                                v8::Local<Value> global_object) {
+                                v8::Local<Value> global_object,
+                                size_t context_snapshot_index) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
   LOG_API(isolate, Context, New);
   i::HandleScope scope(isolate);
   ExtensionConfiguration no_extensions;
   if (extensions == NULL) extensions = &no_extensions;
   i::Handle<i::Context> env =
-      CreateEnvironment(isolate, extensions, global_template, global_object);
+      CreateEnvironment(isolate, extensions, global_template, global_object,
+                        context_snapshot_index);
   if (env.is_null()) {
     if (isolate->has_pending_exception()) {
       isolate->OptionalRescheduleException(true);
@@ -5690,7 +5777,8 @@
 
 bool Context::IsCodeGenerationFromStringsAllowed() {
   i::Handle<i::Context> context = Utils::OpenHandle(this);
-  return !context->allow_code_gen_from_strings()->IsFalse();
+  return !context->allow_code_gen_from_strings()->IsFalse(
+      context->GetIsolate());
 }
 
 
@@ -5744,7 +5832,7 @@
 bool FunctionTemplate::HasInstance(v8::Local<v8::Value> value) {
   auto self = Utils::OpenHandle(this);
   auto obj = Utils::OpenHandle(*value);
-  return self->IsTemplateFor(*obj);
+  return obj->IsJSObject() && self->IsTemplateFor(i::JSObject::cast(*obj));
 }
 
 
@@ -6007,14 +6095,11 @@
 
 bool v8::String::CanMakeExternal() {
   i::Handle<i::String> obj = Utils::OpenHandle(this);
-  i::Isolate* isolate = obj->GetIsolate();
+  if (obj->IsExternalString()) return false;
 
   // Old space strings should be externalized.
-  if (!isolate->heap()->new_space()->Contains(*obj)) return true;
-  int size = obj->Size();  // Byte size of the original string.
-  if (size <= i::ExternalString::kShortSize) return false;
-  i::StringShape shape(*obj);
-  return !shape.IsExternal();
+  i::Isolate* isolate = obj->GetIsolate();
+  return !isolate->heap()->new_space()->Contains(*obj);
 }
 
 
@@ -6077,7 +6162,7 @@
   i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
   i::Isolate* isolate = jsvalue->GetIsolate();
   LOG_API(isolate, BooleanObject, BooleanValue);
-  return jsvalue->value()->IsTrue();
+  return jsvalue->value()->IsTrue(isolate);
 }
 
 
@@ -6324,7 +6409,7 @@
                                               arraysize(argv), argv)
                                .ToHandle(&result);
   RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
-  return Just(result->IsTrue());
+  return Just(result->IsTrue(isolate));
 }
 
 
@@ -6337,7 +6422,7 @@
                                               self, arraysize(argv), argv)
                                .ToHandle(&result);
   RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
-  return Just(result->IsTrue());
+  return Just(result->IsTrue(isolate));
 }
 
 
@@ -6416,7 +6501,7 @@
                                               arraysize(argv), argv)
                                .ToHandle(&result);
   RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
-  return Just(result->IsTrue());
+  return Just(result->IsTrue(isolate));
 }
 
 
@@ -6429,7 +6514,7 @@
                                               self, arraysize(argv), argv)
                                .ToHandle(&result);
   RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
-  return Just(result->IsTrue());
+  return Just(result->IsTrue(isolate));
 }
 
 
@@ -6603,7 +6688,7 @@
   LOG_API(isolate, Promise, HasRejectHandler);
   ENTER_V8(isolate);
   i::Handle<i::Symbol> key = isolate->factory()->promise_has_handler_symbol();
-  return i::JSReceiver::GetDataProperty(promise, key)->IsTrue();
+  return i::JSReceiver::GetDataProperty(promise, key)->IsTrue(isolate);
 }
 
 
@@ -6939,7 +7024,7 @@
   i::Handle<i::Object> symbol =
       i::Object::GetPropertyOrElement(symbols, name).ToHandleChecked();
   if (!symbol->IsSymbol()) {
-    DCHECK(symbol->IsUndefined());
+    DCHECK(symbol->IsUndefined(isolate));
     if (private_symbol)
       symbol = isolate->factory()->NewPrivateSymbol();
     else
@@ -7186,22 +7271,6 @@
   isolate->heap()->SetEmbedderHeapTracer(tracer);
 }
 
-void Isolate::AddMemoryAllocationCallback(MemoryAllocationCallback callback,
-                                          ObjectSpace space,
-                                          AllocationAction action) {
-  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
-  isolate->heap()->memory_allocator()->AddMemoryAllocationCallback(
-      callback, space, action);
-}
-
-
-void Isolate::RemoveMemoryAllocationCallback(
-    MemoryAllocationCallback callback) {
-  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
-  isolate->heap()->memory_allocator()->RemoveMemoryAllocationCallback(callback);
-}
-
-
 void Isolate::TerminateExecution() {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
   isolate->stack_guard()->RequestTerminateExecution();
@@ -7284,18 +7353,12 @@
     v8_isolate->SetAddHistogramSampleFunction(
         params.add_histogram_sample_callback);
   }
+
+  isolate->set_api_external_references(params.external_references);
   SetResourceConstraints(isolate, params.constraints);
   // TODO(jochen): Once we got rid of Isolate::Current(), we can remove this.
   Isolate::Scope isolate_scope(v8_isolate);
   if (params.entry_hook || !i::Snapshot::Initialize(isolate)) {
-    // If the isolate has a function entry hook, it needs to re-build all its
-    // code stubs with entry hooks embedded, so don't deserialize a snapshot.
-    if (i::Snapshot::EmbedsScript(isolate)) {
-      // If the snapshot embeds a script, we cannot initialize the isolate
-      // without the snapshot as a fallback. This is unlikely to happen though.
-      V8_Fatal(__FILE__, __LINE__,
-               "Initializing isolate from custom startup snapshot failed");
-    }
     isolate->Init(NULL);
   }
   return v8_isolate;
@@ -7467,6 +7530,18 @@
   return true;
 }
 
+bool Isolate::GetHeapCodeAndMetadataStatistics(
+    HeapCodeStatistics* code_statistics) {
+  if (!code_statistics) return false;
+
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  isolate->heap()->CollectCodeStatistics();
+
+  code_statistics->code_and_metadata_size_ = isolate->code_and_metadata_size();
+  code_statistics->bytecode_and_metadata_size_ =
+      isolate->bytecode_and_metadata_size();
+  return true;
+}
 
 void Isolate::GetStackSample(const RegisterState& state, void** frames,
                              size_t frames_limit, SampleInfo* sample_info) {
@@ -7690,6 +7765,11 @@
                                                      Locker::IsLocked(this));
 }
 
+void Isolate::SetRAILMode(RAILMode rail_mode) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  return isolate->SetRAILMode(rail_mode);
+}
+
 void Isolate::SetJitCodeEventHandler(JitCodeEventOptions options,
                                      JitCodeEventHandler event_handler) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -7758,7 +7838,7 @@
   i::HandleScope scope(isolate);
   NeanderArray listeners(isolate->factory()->message_listeners());
   for (int i = 0; i < listeners.length(); i++) {
-    if (listeners.get(i)->IsUndefined()) continue;  // skip deleted ones
+    if (listeners.get(i)->IsUndefined(isolate)) continue;  // skip deleted ones
 
     NeanderObject listener(i::JSObject::cast(listeners.get(i)));
     i::Handle<i::Foreign> callback_obj(i::Foreign::cast(listener.get(0)));
@@ -7790,6 +7870,12 @@
 }
 
 
+bool Isolate::IsInUse() {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  return isolate->IsInUse();
+}
+
+
 class VisitorAdapter : public i::ObjectVisitor {
  public:
   explicit VisitorAdapter(PersistentHandleVisitor* visitor)
@@ -8114,6 +8200,14 @@
   return GetDebugContext(reinterpret_cast<Isolate*>(i::Isolate::Current()));
 }
 
+MaybeLocal<Context> Debug::GetDebuggedContext(Isolate* isolate) {
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  ENTER_V8(i_isolate);
+  if (!i_isolate->debug()->in_debug_scope()) return MaybeLocal<Context>();
+  i::Handle<i::Object> calling = i_isolate->GetCallingNativeContext();
+  if (calling.is_null()) return MaybeLocal<Context>();
+  return Utils::ToLocal(i::Handle<i::Context>::cast(calling));
+}
 
 void Debug::SetLiveEditEnabled(Isolate* isolate, bool enable) {
   i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -8240,9 +8334,8 @@
 
 void CpuProfile::Delete() {
   i::CpuProfile* profile = reinterpret_cast<i::CpuProfile*>(this);
-  i::Isolate* isolate = profile->top_down()->isolate();
-  i::CpuProfiler* profiler = isolate->cpu_profiler();
-  DCHECK(profiler != NULL);
+  i::CpuProfiler* profiler = profile->cpu_profiler();
+  DCHECK(profiler != nullptr);
   profiler->DeleteProfile(profile);
 }
 
@@ -8316,8 +8409,8 @@
 
 void CpuProfiler::SetIdle(bool is_idle) {
   i::CpuProfiler* profiler = reinterpret_cast<i::CpuProfiler*>(this);
-  if (!profiler->is_profiling()) return;
   i::Isolate* isolate = profiler->isolate();
+  if (!isolate->is_profiling()) return;
   v8::StateTag state = isolate->current_vm_state();
   DCHECK(state == v8::EXTERNAL || state == v8::IDLE);
   if (isolate->js_entry_sp() != NULL) return;
diff --git a/src/api.h b/src/api.h
index cb2b5c3..a6f403d 100644
--- a/src/api.h
+++ b/src/api.h
@@ -281,7 +281,9 @@
 
   template<class From, class To>
   static inline Local<To> Convert(v8::internal::Handle<From> obj) {
-    DCHECK(obj.is_null() || !obj->IsTheHole());
+    DCHECK(obj.is_null() ||
+           (obj->IsSmi() ||
+            !obj->IsTheHole(i::HeapObject::cast(*obj)->GetIsolate())));
     return Local<To>(reinterpret_cast<To*>(obj.location()));
   }
 
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 1ccc3a6..9633a63 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -57,7 +57,7 @@
     answer |= 1u << ARMv8;
     // ARMv8 always features VFP and NEON.
     answer |= 1u << ARMv7 | 1u << VFP3 | 1u << NEON | 1u << VFP32DREGS;
-    answer |= 1u << SUDIV | 1u << MLS;
+    answer |= 1u << SUDIV;
   }
 #endif  // CAN_USE_ARMV8_INSTRUCTIONS
 #ifdef CAN_USE_ARMV7_INSTRUCTIONS
@@ -93,7 +93,7 @@
     supported_ |= 1u << ARMv8;
     // ARMv8 always features VFP and NEON.
     supported_ |= 1u << ARMv7 | 1u << VFP3 | 1u << NEON | 1u << VFP32DREGS;
-    supported_ |= 1u << SUDIV | 1u << MLS;
+    supported_ |= 1u << SUDIV;
     if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
   }
   if (FLAG_enable_armv7) {
@@ -104,7 +104,6 @@
     if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
     if (FLAG_enable_32dregs) supported_ |= 1u << VFP32DREGS;
   }
-  if (FLAG_enable_mls) supported_ |= 1u << MLS;
   if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
 
 #else  // __arm__
@@ -119,7 +118,6 @@
 
   if (FLAG_enable_neon && cpu.has_neon()) supported_ |= 1u << NEON;
   if (FLAG_enable_sudiv && cpu.has_idiva()) supported_ |= 1u << SUDIV;
-  if (FLAG_enable_mls && cpu.has_thumb2()) supported_ |= 1u << MLS;
 
   if (cpu.architecture() >= 7) {
     if (FLAG_enable_armv7) supported_ |= 1u << ARMv7;
@@ -203,12 +201,11 @@
 
 void CpuFeatures::PrintFeatures() {
   printf(
-      "ARMv8=%d ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d MLS=%d"
+      "ARMv8=%d ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d "
       "UNALIGNED_ACCESSES=%d MOVW_MOVT_IMMEDIATE_LOADS=%d",
       CpuFeatures::IsSupported(ARMv8), CpuFeatures::IsSupported(ARMv7),
       CpuFeatures::IsSupported(VFP3), CpuFeatures::IsSupported(VFP32DREGS),
       CpuFeatures::IsSupported(NEON), CpuFeatures::IsSupported(SUDIV),
-      CpuFeatures::IsSupported(MLS),
       CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
       CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS));
 #ifdef __arm__
@@ -252,31 +249,20 @@
   return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
 }
 
-void RelocInfo::update_wasm_memory_reference(
-    Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
-    ICacheFlushMode icache_flush_mode) {
-  DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
-  if (IsWasmMemoryReference(rmode_)) {
-    Address updated_memory_reference;
-    DCHECK(old_base <= wasm_memory_reference() &&
-           wasm_memory_reference() < old_base + old_size);
-    updated_memory_reference = new_base + (wasm_memory_reference() - old_base);
-    DCHECK(new_base <= updated_memory_reference &&
-           updated_memory_reference < new_base + new_size);
-    Assembler::set_target_address_at(
-        isolate_, pc_, host_, updated_memory_reference, icache_flush_mode);
-  } else if (IsWasmMemorySizeReference(rmode_)) {
-    uint32_t updated_size_reference;
-    DCHECK(wasm_memory_size_reference() <= old_size);
-    updated_size_reference =
-        new_size + (wasm_memory_size_reference() - old_size);
-    DCHECK(updated_size_reference <= new_size);
-    Assembler::set_target_address_at(
-        isolate_, pc_, host_, reinterpret_cast<Address>(updated_size_reference),
-        icache_flush_mode);
-  } else {
-    UNREACHABLE();
-  }
+Address RelocInfo::wasm_global_reference() {
+  DCHECK(IsWasmGlobalReference(rmode_));
+  return Assembler::target_address_at(pc_, host_);
+}
+
+void RelocInfo::unchecked_update_wasm_memory_reference(
+    Address address, ICacheFlushMode flush_mode) {
+  Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
+}
+
+void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
+                                                  ICacheFlushMode flush_mode) {
+  Assembler::set_target_address_at(isolate_, pc_, host_,
+                                   reinterpret_cast<Address>(size), flush_mode);
 }
 
 // -----------------------------------------------------------------------------
@@ -486,17 +472,16 @@
     al | B26 | NegOffset | Register::kCode_fp * B16;
 const Instr kLdrStrInstrTypeMask = 0xffff0000;
 
-
 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
     : AssemblerBase(isolate, buffer, buffer_size),
       recorded_ast_id_(TypeFeedbackId::None()),
-      pending_32_bit_constants_(&pending_32_bit_constants_buffer_[0]),
-      pending_64_bit_constants_(&pending_64_bit_constants_buffer_[0]),
+      pending_32_bit_constants_(),
+      pending_64_bit_constants_(),
       constant_pool_builder_(kLdrMaxReachBits, kVldrMaxReachBits),
       positions_recorder_(this) {
+  pending_32_bit_constants_.reserve(kMinNumPendingConstants);
+  pending_64_bit_constants_.reserve(kMinNumPendingConstants);
   reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
-  num_pending_32_bit_constants_ = 0;
-  num_pending_64_bit_constants_ = 0;
   next_buffer_check_ = 0;
   const_pool_blocked_nesting_ = 0;
   no_const_pool_before_ = 0;
@@ -509,12 +494,6 @@
 
 Assembler::~Assembler() {
   DCHECK(const_pool_blocked_nesting_ == 0);
-  if (pending_32_bit_constants_ != &pending_32_bit_constants_buffer_[0]) {
-    delete[] pending_32_bit_constants_;
-  }
-  if (pending_64_bit_constants_ != &pending_64_bit_constants_buffer_[0]) {
-    delete[] pending_64_bit_constants_;
-  }
 }
 
 
@@ -527,8 +506,8 @@
     constant_pool_offset = EmitEmbeddedConstantPool();
   } else {
     CheckConstPool(true, false);
-    DCHECK(num_pending_32_bit_constants_ == 0);
-    DCHECK(num_pending_64_bit_constants_ == 0);
+    DCHECK(pending_32_bit_constants_.empty());
+    DCHECK(pending_64_bit_constants_.empty());
   }
   // Set up code descriptor.
   desc->buffer = buffer_;
@@ -538,6 +517,8 @@
   desc->constant_pool_size =
       (constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
   desc->origin = this;
+  desc->unwinding_info_size = 0;
+  desc->unwinding_info = nullptr;
 }
 
 
@@ -851,6 +832,19 @@
     // Load the position of the label relative to the generated code object
     // pointer in a register.
 
+    // The existing code must be a single 24-bit label chain link, followed by
+    // nops encoding the destination register. See mov_label_offset.
+
+    // Extract the destination register from the first nop instructions.
+    Register dst =
+        Register::from_code(Instruction::RmValue(instr_at(pos + kInstrSize)));
+    // In addition to the 24-bit label chain link, we expect to find one nop for
+    // ARMv7 and above, or two nops for ARMv6. See mov_label_offset.
+    DCHECK(IsNop(instr_at(pos + kInstrSize), dst.code()));
+    if (!CpuFeatures::IsSupported(ARMv7)) {
+      DCHECK(IsNop(instr_at(pos + 2 * kInstrSize), dst.code()));
+    }
+
     // Here are the instructions we need to emit:
     //   For ARMv7: target24 => target16_1:target16_0
     //      movw dst, #target16_0
@@ -860,10 +854,6 @@
     //      orr dst, dst, #target8_1 << 8
     //      orr dst, dst, #target8_2 << 16
 
-    // We extract the destination register from the emitted nop instruction.
-    Register dst = Register::from_code(
-        Instruction::RmValue(instr_at(pos + kInstrSize)));
-    DCHECK(IsNop(instr_at(pos + kInstrSize), dst.code()));
     uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
     DCHECK(is_uint24(target24));
     if (is_uint8(target24)) {
@@ -1390,7 +1380,6 @@
 
 
 void Assembler::bl(int branch_offset, Condition cond) {
-  positions_recorder()->WriteRecordedPositions();
   DCHECK((branch_offset & 3) == 0);
   int imm24 = branch_offset >> 2;
   CHECK(is_int24(imm24));
@@ -1399,7 +1388,6 @@
 
 
 void Assembler::blx(int branch_offset) {  // v5 and above
-  positions_recorder()->WriteRecordedPositions();
   DCHECK((branch_offset & 1) == 0);
   int h = ((branch_offset & 2) >> 1)*B24;
   int imm24 = branch_offset >> 2;
@@ -1409,14 +1397,12 @@
 
 
 void Assembler::blx(Register target, Condition cond) {  // v5 and above
-  positions_recorder()->WriteRecordedPositions();
   DCHECK(!target.is(pc));
   emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
 }
 
 
 void Assembler::bx(Register target, Condition cond) {  // v5 and above, plus v4t
-  positions_recorder()->WriteRecordedPositions();
   DCHECK(!target.is(pc));  // use of pc is actually allowed, but discouraged
   emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
 }
@@ -1524,9 +1510,6 @@
 
 
 void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
-  if (dst.is(pc)) {
-    positions_recorder()->WriteRecordedPositions();
-  }
   // Don't allow nop instructions in the form mov rn, rn to be generated using
   // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
   // or MarkCode(int/NopMarkerTypes) pseudo instructions.
@@ -1609,7 +1592,7 @@
 void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
                     Condition cond) {
   DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
-  DCHECK(IsEnabled(MLS));
+  DCHECK(IsEnabled(ARMv7));
   emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
        src2.code()*B8 | B7 | B4 | src1.code());
 }
@@ -2015,9 +1998,6 @@
 
 // Load/Store instructions.
 void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
-  if (dst.is(pc)) {
-    positions_recorder()->WriteRecordedPositions();
-  }
   addrmod2(cond | B26 | L, dst, src);
 }
 
@@ -2076,6 +2056,53 @@
   addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
 }
 
+// Load/Store exclusive instructions.
+void Assembler::ldrex(Register dst, Register src, Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.75.
+  // cond(31-28) | 00011001(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
+  emit(cond | B24 | B23 | B20 | src.code() * B16 | dst.code() * B12 | 0xf9f);
+}
+
+void Assembler::strex(Register src1, Register src2, Register dst,
+                      Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.212.
+  // cond(31-28) | 00011000(27-20) | Rn(19-16) | Rd(15-12) | 11111001(11-4) |
+  // Rt(3-0)
+  emit(cond | B24 | B23 | dst.code() * B16 | src1.code() * B12 | 0xf9 * B4 |
+       src2.code());
+}
+
+void Assembler::ldrexb(Register dst, Register src, Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.76.
+  // cond(31-28) | 00011101(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
+  emit(cond | B24 | B23 | B22 | B20 | src.code() * B16 | dst.code() * B12 |
+       0xf9f);
+}
+
+void Assembler::strexb(Register src1, Register src2, Register dst,
+                       Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.213.
+  // cond(31-28) | 00011100(27-20) | Rn(19-16) | Rd(15-12) | 11111001(11-4) |
+  // Rt(3-0)
+  emit(cond | B24 | B23 | B22 | dst.code() * B16 | src1.code() * B12 |
+       0xf9 * B4 | src2.code());
+}
+
+void Assembler::ldrexh(Register dst, Register src, Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.78.
+  // cond(31-28) | 00011111(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
+  emit(cond | B24 | B23 | B22 | B21 | B20 | src.code() * B16 |
+       dst.code() * B12 | 0xf9f);
+}
+
+void Assembler::strexh(Register src1, Register src2, Register dst,
+                       Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.215.
+  // cond(31-28) | 00011110(27-20) | Rn(19-16) | Rd(15-12) | 11111001(11-4) |
+  // Rt(3-0)
+  emit(cond | B24 | B23 | B22 | B21 | dst.code() * B16 | src1.code() * B12 |
+       0xf9 * B4 | src2.code());
+}
 
 // Preload instructions.
 void Assembler::pld(const MemOperand& address) {
@@ -3827,8 +3854,8 @@
 void Assembler::db(uint8_t data) {
   // db is used to write raw data. The constant pool should be emitted or
   // blocked before using db.
-  DCHECK(is_const_pool_blocked() || (num_pending_32_bit_constants_ == 0));
-  DCHECK(is_const_pool_blocked() || (num_pending_64_bit_constants_ == 0));
+  DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
+  DCHECK(is_const_pool_blocked() || pending_64_bit_constants_.empty());
   CheckBuffer();
   *reinterpret_cast<uint8_t*>(pc_) = data;
   pc_ += sizeof(uint8_t);
@@ -3838,8 +3865,8 @@
 void Assembler::dd(uint32_t data) {
   // dd is used to write raw data. The constant pool should be emitted or
   // blocked before using dd.
-  DCHECK(is_const_pool_blocked() || (num_pending_32_bit_constants_ == 0));
-  DCHECK(is_const_pool_blocked() || (num_pending_64_bit_constants_ == 0));
+  DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
+  DCHECK(is_const_pool_blocked() || pending_64_bit_constants_.empty());
   CheckBuffer();
   *reinterpret_cast<uint32_t*>(pc_) = data;
   pc_ += sizeof(uint32_t);
@@ -3849,8 +3876,8 @@
 void Assembler::dq(uint64_t value) {
   // dq is used to write raw data. The constant pool should be emitted or
   // blocked before using dq.
-  DCHECK(is_const_pool_blocked() || (num_pending_32_bit_constants_ == 0));
-  DCHECK(is_const_pool_blocked() || (num_pending_64_bit_constants_ == 0));
+  DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
+  DCHECK(is_const_pool_blocked() || pending_64_bit_constants_.empty());
   CheckBuffer();
   *reinterpret_cast<uint64_t*>(pc_) = value;
   pc_ += sizeof(uint64_t);
@@ -3893,21 +3920,12 @@
   if (FLAG_enable_embedded_constant_pool) {
     return constant_pool_builder_.AddEntry(position, value, sharing_ok);
   } else {
-    DCHECK(num_pending_32_bit_constants_ < kMaxNumPending32Constants);
-    if (num_pending_32_bit_constants_ == 0) {
+    DCHECK(pending_32_bit_constants_.size() < kMaxNumPending32Constants);
+    if (pending_32_bit_constants_.empty()) {
       first_const_pool_32_use_ = position;
-    } else if (num_pending_32_bit_constants_ == kMinNumPendingConstants &&
-               pending_32_bit_constants_ ==
-                   &pending_32_bit_constants_buffer_[0]) {
-      // Inline buffer is full, switch to dynamically allocated buffer.
-      pending_32_bit_constants_ =
-          new ConstantPoolEntry[kMaxNumPending32Constants];
-      std::copy(&pending_32_bit_constants_buffer_[0],
-                &pending_32_bit_constants_buffer_[kMinNumPendingConstants],
-                &pending_32_bit_constants_[0]);
     }
     ConstantPoolEntry entry(position, value, sharing_ok);
-    pending_32_bit_constants_[num_pending_32_bit_constants_++] = entry;
+    pending_32_bit_constants_.push_back(entry);
 
     // Make sure the constant pool is not emitted in place of the next
     // instruction for which we just recorded relocation info.
@@ -3922,21 +3940,12 @@
   if (FLAG_enable_embedded_constant_pool) {
     return constant_pool_builder_.AddEntry(position, value);
   } else {
-    DCHECK(num_pending_64_bit_constants_ < kMaxNumPending64Constants);
-    if (num_pending_64_bit_constants_ == 0) {
+    DCHECK(pending_64_bit_constants_.size() < kMaxNumPending64Constants);
+    if (pending_64_bit_constants_.empty()) {
       first_const_pool_64_use_ = position;
-    } else if (num_pending_64_bit_constants_ == kMinNumPendingConstants &&
-               pending_64_bit_constants_ ==
-                   &pending_64_bit_constants_buffer_[0]) {
-      // Inline buffer is full, switch to dynamically allocated buffer.
-      pending_64_bit_constants_ =
-          new ConstantPoolEntry[kMaxNumPending64Constants];
-      std::copy(&pending_64_bit_constants_buffer_[0],
-                &pending_64_bit_constants_buffer_[kMinNumPendingConstants],
-                &pending_64_bit_constants_[0]);
     }
     ConstantPoolEntry entry(position, value);
-    pending_64_bit_constants_[num_pending_64_bit_constants_++] = entry;
+    pending_64_bit_constants_.push_back(entry);
 
     // Make sure the constant pool is not emitted in place of the next
     // instruction for which we just recorded relocation info.
@@ -3949,8 +3958,8 @@
 void Assembler::BlockConstPoolFor(int instructions) {
   if (FLAG_enable_embedded_constant_pool) {
     // Should be a no-op if using an embedded constant pool.
-    DCHECK(num_pending_32_bit_constants_ == 0);
-    DCHECK(num_pending_64_bit_constants_ == 0);
+    DCHECK(pending_32_bit_constants_.empty());
+    DCHECK(pending_64_bit_constants_.empty());
     return;
   }
 
@@ -3959,11 +3968,11 @@
     // Max pool start (if we need a jump and an alignment).
 #ifdef DEBUG
     int start = pc_limit + kInstrSize + 2 * kPointerSize;
-    DCHECK((num_pending_32_bit_constants_ == 0) ||
+    DCHECK(pending_32_bit_constants_.empty() ||
            (start - first_const_pool_32_use_ +
-                num_pending_64_bit_constants_ * kDoubleSize <
+                pending_64_bit_constants_.size() * kDoubleSize <
             kMaxDistToIntPool));
-    DCHECK((num_pending_64_bit_constants_ == 0) ||
+    DCHECK(pending_64_bit_constants_.empty() ||
            (start - first_const_pool_64_use_ < kMaxDistToFPPool));
 #endif
     no_const_pool_before_ = pc_limit;
@@ -3978,8 +3987,8 @@
 void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
   if (FLAG_enable_embedded_constant_pool) {
     // Should be a no-op if using an embedded constant pool.
-    DCHECK(num_pending_32_bit_constants_ == 0);
-    DCHECK(num_pending_64_bit_constants_ == 0);
+    DCHECK(pending_32_bit_constants_.empty());
+    DCHECK(pending_64_bit_constants_.empty());
     return;
   }
 
@@ -3993,8 +4002,7 @@
   }
 
   // There is nothing to do if there are no pending constant pool entries.
-  if ((num_pending_32_bit_constants_ == 0) &&
-      (num_pending_64_bit_constants_ == 0)) {
+  if (pending_32_bit_constants_.empty() && pending_64_bit_constants_.empty()) {
     // Calculate the offset of the next check.
     next_buffer_check_ = pc_offset() + kCheckPoolInterval;
     return;
@@ -4006,9 +4014,9 @@
   int jump_instr = require_jump ? kInstrSize : 0;
   int size_up_to_marker = jump_instr + kInstrSize;
   int estimated_size_after_marker =
-      num_pending_32_bit_constants_ * kPointerSize;
-  bool has_int_values = (num_pending_32_bit_constants_ > 0);
-  bool has_fp_values = (num_pending_64_bit_constants_ > 0);
+      pending_32_bit_constants_.size() * kPointerSize;
+  bool has_int_values = !pending_32_bit_constants_.empty();
+  bool has_fp_values = !pending_64_bit_constants_.empty();
   bool require_64_bit_align = false;
   if (has_fp_values) {
     require_64_bit_align =
@@ -4017,7 +4025,8 @@
     if (require_64_bit_align) {
       estimated_size_after_marker += kInstrSize;
     }
-    estimated_size_after_marker += num_pending_64_bit_constants_ * kDoubleSize;
+    estimated_size_after_marker +=
+        pending_64_bit_constants_.size() * kDoubleSize;
   }
   int estimated_size = size_up_to_marker + estimated_size_after_marker;
 
@@ -4036,7 +4045,7 @@
       // The 64-bit constants are always emitted before the 32-bit constants, so
       // we can ignore the effect of the 32-bit constants on estimated_size.
       int dist64 = pc_offset() + estimated_size -
-                   num_pending_32_bit_constants_ * kPointerSize -
+                   pending_32_bit_constants_.size() * kPointerSize -
                    first_const_pool_64_use_;
       if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) ||
           (!require_jump && (dist64 >= kMaxDistToFPPool / 2))) {
@@ -4055,7 +4064,7 @@
 
   // Deduplicate constants.
   int size_after_marker = estimated_size_after_marker;
-  for (int i = 0; i < num_pending_64_bit_constants_; i++) {
+  for (int i = 0; i < pending_64_bit_constants_.size(); i++) {
     ConstantPoolEntry& entry = pending_64_bit_constants_[i];
     DCHECK(!entry.is_merged());
     for (int j = 0; j < i; j++) {
@@ -4068,7 +4077,7 @@
     }
   }
 
-  for (int i = 0; i < num_pending_32_bit_constants_; i++) {
+  for (int i = 0; i < pending_32_bit_constants_.size(); i++) {
     ConstantPoolEntry& entry = pending_32_bit_constants_[i];
     DCHECK(!entry.is_merged());
     if (!entry.sharing_ok()) continue;
@@ -4113,7 +4122,7 @@
 
     // Emit 64-bit constant pool entries first: their range is smaller than
     // 32-bit entries.
-    for (int i = 0; i < num_pending_64_bit_constants_; i++) {
+    for (int i = 0; i < pending_64_bit_constants_.size(); i++) {
       ConstantPoolEntry& entry = pending_64_bit_constants_[i];
 
       Instr instr = instr_at(entry.position());
@@ -4142,7 +4151,7 @@
     }
 
     // Emit 32-bit constant pool entries.
-    for (int i = 0; i < num_pending_32_bit_constants_; i++) {
+    for (int i = 0; i < pending_32_bit_constants_.size(); i++) {
       ConstantPoolEntry& entry = pending_32_bit_constants_[i];
       Instr instr = instr_at(entry.position());
 
@@ -4176,8 +4185,8 @@
       }
     }
 
-    num_pending_32_bit_constants_ = 0;
-    num_pending_64_bit_constants_ = 0;
+    pending_32_bit_constants_.clear();
+    pending_64_bit_constants_.clear();
     first_const_pool_32_use_ = -1;
     first_const_pool_64_use_ = -1;
 
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 26e062b..461d5b0 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -118,8 +118,6 @@
     Register r = {code};
     return r;
   }
-  const char* ToString();
-  bool IsAllocatable() const;
   bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
   bool is(Register reg) const { return reg_code == reg.reg_code; }
   int code() const {
@@ -147,9 +145,22 @@
 #undef DECLARE_REGISTER
 const Register no_reg = {Register::kCode_no_reg};
 
+static const bool kSimpleFPAliasing = false;
+
 // Single word VFP register.
 struct SwVfpRegister {
+  enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+    FLOAT_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+        kAfterLast,
+    kCode_no_reg = -1
+  };
+
+  static const int kMaxNumRegisters = Code::kAfterLast;
+
   static const int kSizeInBytes = 4;
+
   bool is_valid() const { return 0 <= reg_code && reg_code < 32; }
   bool is(SwVfpRegister reg) const { return reg_code == reg.reg_code; }
   int code() const {
@@ -195,8 +206,6 @@
   //  d15: scratch register.
   static const int kSizeInBytes = 8;
 
-  const char* ToString();
-  bool IsAllocatable() const;
   bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
   bool is(DwVfpRegister reg) const { return reg_code == reg.reg_code; }
   int code() const {
@@ -986,6 +995,14 @@
             Register src2,
             const MemOperand& dst, Condition cond = al);
 
+  // Load/Store exclusive instructions
+  void ldrex(Register dst, Register src, Condition cond = al);
+  void strex(Register src1, Register src2, Register dst, Condition cond = al);
+  void ldrexb(Register dst, Register src, Condition cond = al);
+  void strexb(Register src1, Register src2, Register dst, Condition cond = al);
+  void ldrexh(Register dst, Register src, Condition cond = al);
+  void strexh(Register src1, Register src2, Register dst, Condition cond = al);
+
   // Preload instructions
   void pld(const MemOperand& address);
 
@@ -1312,6 +1329,10 @@
     vstm(db_w, sp, src, src, cond);
   }
 
+  void vpush(SwVfpRegister src, Condition cond = al) {
+    vstm(db_w, sp, src, src, cond);
+  }
+
   void vpop(DwVfpRegister dst, Condition cond = al) {
     vldm(ia_w, sp, dst, dst, cond);
   }
@@ -1545,10 +1566,10 @@
       // Max pool start (if we need a jump and an alignment).
       int start = pc_offset() + kInstrSize + 2 * kPointerSize;
       // Check the constant pool hasn't been blocked for too long.
-      DCHECK((num_pending_32_bit_constants_ == 0) ||
-             (start + num_pending_64_bit_constants_ * kDoubleSize <
+      DCHECK(pending_32_bit_constants_.empty() ||
+             (start + pending_64_bit_constants_.size() * kDoubleSize <
               (first_const_pool_32_use_ + kMaxDistToIntPool)));
-      DCHECK((num_pending_64_bit_constants_ == 0) ||
+      DCHECK(pending_64_bit_constants_.empty() ||
              (start < (first_const_pool_64_use_ + kMaxDistToFPPool)));
 #endif
       // Two cases:
@@ -1615,14 +1636,8 @@
   // pending relocation entry per instruction.
 
   // The buffers of pending constant pool entries.
-  ConstantPoolEntry pending_32_bit_constants_buffer_[kMinNumPendingConstants];
-  ConstantPoolEntry pending_64_bit_constants_buffer_[kMinNumPendingConstants];
-  ConstantPoolEntry* pending_32_bit_constants_;
-  ConstantPoolEntry* pending_64_bit_constants_;
-  // Number of pending constant pool entries in the 32 bits buffer.
-  int num_pending_32_bit_constants_;
-  // Number of pending constant pool entries in the 64 bits buffer.
-  int num_pending_64_bit_constants_;
+  std::vector<ConstantPoolEntry> pending_32_bit_constants_;
+  std::vector<ConstantPoolEntry> pending_64_bit_constants_;
 
   ConstantPoolBuilder constant_pool_builder_;
 
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 031b483..365bc1e 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -16,10 +16,7 @@
 
 #define __ ACCESS_MASM(masm)
 
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
-                                CFunctionId id,
-                                BuiltinExtraArguments extra_args) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
   // ----------- S t a t e -------------
   //  -- r0                 : number of arguments excluding receiver
   //  -- r1                 : target
@@ -38,23 +35,8 @@
   __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
 
   // Insert extra arguments.
-  int num_extra_args = 0;
-  switch (extra_args) {
-    case BuiltinExtraArguments::kTarget:
-      __ Push(r1);
-      ++num_extra_args;
-      break;
-    case BuiltinExtraArguments::kNewTarget:
-      __ Push(r3);
-      ++num_extra_args;
-      break;
-    case BuiltinExtraArguments::kTargetAndNewTarget:
-      __ Push(r1, r3);
-      num_extra_args += 2;
-      break;
-    case BuiltinExtraArguments::kNone:
-      break;
-  }
+  const int num_extra_args = 2;
+  __ Push(r1, r3);
 
   // JumpToExternalReference expects r0 to contain the number of arguments
   // including the receiver and the extra arguments.
@@ -140,6 +122,8 @@
 void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
   // ----------- S t a t e -------------
   //  -- r0                 : number of arguments
+  //  -- r1                 : function
+  //  -- cp                 : context
   //  -- lr                 : return address
   //  -- sp[(argc - n) * 8] : arg[n] (zero-based)
   //  -- sp[(argc + 1) * 8] : receiver
@@ -152,9 +136,9 @@
   DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? d2 : d1;
 
   // Load the accumulator with the default return value (either -Infinity or
-  // +Infinity), with the tagged value in r1 and the double value in d1.
-  __ LoadRoot(r1, root_index);
-  __ vldr(d1, FieldMemOperand(r1, HeapNumber::kValueOffset));
+  // +Infinity), with the tagged value in r5 and the double value in d1.
+  __ LoadRoot(r5, root_index);
+  __ vldr(d1, FieldMemOperand(r5, HeapNumber::kValueOffset));
 
   // Remember how many slots to drop (including the receiver).
   __ add(r4, r0, Operand(1));
@@ -170,33 +154,36 @@
     __ ldr(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2));
 
     // Load the double value of the parameter into d2, maybe converting the
-    // parameter to a number first using the ToNumberStub if necessary.
+    // parameter to a number first using the ToNumber builtin if necessary.
     Label convert, convert_smi, convert_number, done_convert;
     __ bind(&convert);
     __ JumpIfSmi(r2, &convert_smi);
     __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
     __ JumpIfRoot(r3, Heap::kHeapNumberMapRootIndex, &convert_number);
     {
-      // Parameter is not a Number, use the ToNumberStub to convert it.
-      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      // Parameter is not a Number, use the ToNumber builtin to convert it.
+      DCHECK(!FLAG_enable_embedded_constant_pool);
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ Push(lr, fp, cp, r1);
+      __ add(fp, sp, Operand(2 * kPointerSize));
       __ SmiTag(r0);
       __ SmiTag(r4);
-      __ Push(r0, r1, r4);
+      __ Push(r0, r4, r5);
       __ mov(r0, r2);
-      ToNumberStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
       __ mov(r2, r0);
-      __ Pop(r0, r1, r4);
+      __ Pop(r0, r4, r5);
       {
         // Restore the double accumulator value (d1).
         Label done_restore;
-        __ SmiToDouble(d1, r1);
-        __ JumpIfSmi(r1, &done_restore);
-        __ vldr(d1, FieldMemOperand(r1, HeapNumber::kValueOffset));
+        __ SmiToDouble(d1, r5);
+        __ JumpIfSmi(r5, &done_restore);
+        __ vldr(d1, FieldMemOperand(r5, HeapNumber::kValueOffset));
         __ bind(&done_restore);
       }
       __ SmiUntag(r4);
       __ SmiUntag(r0);
+      __ Pop(lr, fp, cp, r1);
     }
     __ b(&convert);
     __ bind(&convert_number);
@@ -222,18 +209,18 @@
     // Result is on the right hand side.
     __ bind(&compare_swap);
     __ vmov(d1, d2);
-    __ mov(r1, r2);
+    __ mov(r5, r2);
     __ b(&loop);
 
     // At least one side is NaN, which means that the result will be NaN too.
     __ bind(&compare_nan);
-    __ LoadRoot(r1, Heap::kNanValueRootIndex);
-    __ vldr(d1, FieldMemOperand(r1, HeapNumber::kValueOffset));
+    __ LoadRoot(r5, Heap::kNanValueRootIndex);
+    __ vldr(d1, FieldMemOperand(r5, HeapNumber::kValueOffset));
     __ b(&loop);
   }
 
   __ bind(&done_loop);
-  __ mov(r0, r1);
+  __ mov(r0, r5);
   __ Drop(r4);
   __ Ret();
 }
@@ -259,8 +246,7 @@
   }
 
   // 2a. Convert the first argument to a number.
-  ToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
+  __ Jump(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
 
   // 2b. No arguments, return +0.
   __ bind(&no_arguments);
@@ -308,8 +294,7 @@
       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
       __ Push(r1, r3);
       __ Move(r0, r2);
-      ToNumberStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
       __ Move(r2, r0);
       __ Pop(r1, r3);
     }
@@ -708,8 +693,8 @@
   __ AssertGeneratorObject(r1);
 
   // Store input value into generator object.
-  __ str(r0, FieldMemOperand(r1, JSGeneratorObject::kInputOffset));
-  __ RecordWriteField(r1, JSGeneratorObject::kInputOffset, r0, r3,
+  __ str(r0, FieldMemOperand(r1, JSGeneratorObject::kInputOrDebugPosOffset));
+  __ RecordWriteField(r1, JSGeneratorObject::kInputOrDebugPosOffset, r0, r3,
                       kLRHasNotBeenSaved, kDontSaveFPRegs);
 
   // Store resume mode into generator object.
@@ -720,21 +705,24 @@
   __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
 
   // Flood function if we are stepping.
-  Label skip_flooding;
-  ExternalReference step_in_enabled =
-      ExternalReference::debug_step_in_enabled_address(masm->isolate());
-  __ mov(ip, Operand(step_in_enabled));
-  __ ldrb(ip, MemOperand(ip));
-  __ cmp(ip, Operand(0));
-  __ b(eq, &skip_flooding);
-  {
-    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    __ Push(r1, r2, r4);
-    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
-    __ Pop(r1, r2);
-    __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
-  }
-  __ bind(&skip_flooding);
+  Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+  Label stepping_prepared;
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(masm->isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  __ mov(ip, Operand(last_step_action));
+  __ ldrsb(ip, MemOperand(ip));
+  __ cmp(ip, Operand(StepIn));
+  __ b(ge, &prepare_step_in_if_stepping);
+
+  // Flood function if we need to continue stepping in the suspended generator.
+  ExternalReference debug_suspended_generator =
+      ExternalReference::debug_suspended_generator_address(masm->isolate());
+  __ mov(ip, Operand(debug_suspended_generator));
+  __ ldr(ip, MemOperand(ip));
+  __ cmp(ip, Operand(r1));
+  __ b(eq, &prepare_step_in_suspended_generator);
+  __ bind(&stepping_prepared);
 
   // Push receiver.
   __ ldr(ip, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
@@ -830,6 +818,26 @@
     __ Move(r0, r1);  // Continuation expects generator object in r0.
     __ Jump(r3);
   }
+
+  __ bind(&prepare_step_in_if_stepping);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r1, r2, r4);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(r1, r2);
+    __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
+  }
+  __ b(&stepping_prepared);
+
+  __ bind(&prepare_step_in_suspended_generator);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r1, r2);
+    __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+    __ Pop(r1, r2);
+    __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
+  }
+  __ b(&stepping_prepared);
 }
 
 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
@@ -959,6 +967,22 @@
   Generate_JSEntryTrampolineHelper(masm, true);
 }
 
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
+  Register args_count = scratch;
+
+  // Get the arguments + receiver count.
+  __ ldr(args_count,
+         MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ ldr(args_count,
+         FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+  // Leave the frame (also dropping the register file).
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+
+  // Drop receiver + arguments.
+  __ add(sp, sp, args_count, LeaveCC);
+}
+
 // Generate code for entering a JS function with the interpreter.
 // On entry to the function the receiver and arguments have been pushed on the
 // stack left to right.  The actual argument count matches the formal parameter
@@ -1062,15 +1086,7 @@
   masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
 
   // The return value is in r0.
-
-  // Get the arguments + reciever count.
-  __ ldr(r2, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
-  __ ldr(r2, FieldMemOperand(r2, BytecodeArray::kParameterSizeOffset));
-
-  // Leave the frame (also dropping the register file).
-  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
-
-  __ add(sp, sp, r2, LeaveCC);
+  LeaveInterpreterFrame(masm, r2);
   __ Jump(lr);
 
   // If the bytecode array is no longer present, then the underlying function
@@ -1086,6 +1102,31 @@
   __ Jump(r4);
 }
 
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+  // Save the function and context for call to CompileBaseline.
+  __ ldr(r1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+  __ ldr(kContextRegister,
+         MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+  // Leave the frame before recompiling for baseline so that we don't count as
+  // an activation on the stack.
+  LeaveInterpreterFrame(masm, r2);
+
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    // Push return value.
+    __ push(r0);
+
+    // Push function as argument and compile for baseline.
+    __ push(r1);
+    __ CallRuntime(Runtime::kCompileBaseline);
+
+    // Restore return value.
+    __ pop(r0);
+  }
+  __ Jump(lr);
+}
+
 static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
                                          Register limit, Register scratch) {
   Label loop_header, loop_check;
@@ -1242,13 +1283,29 @@
   const int bailout_id = BailoutId::None().ToInt();
   __ cmp(temp, Operand(Smi::FromInt(bailout_id)));
   __ b(ne, &loop_bottom);
+
   // Literals available?
+  Label got_literals, maybe_cleared_weakcell;
   __ ldr(temp, FieldMemOperand(array_pointer,
                                SharedFunctionInfo::kOffsetToPreviousLiterals));
+  // temp contains either a WeakCell pointing to the literals array or the
+  // literals array directly.
+  STATIC_ASSERT(WeakCell::kValueOffset == FixedArray::kLengthOffset);
+  __ ldr(r4, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ JumpIfSmi(r4, &maybe_cleared_weakcell);
+  // r4 is a pointer, therefore temp is a WeakCell pointing to a literals array.
   __ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
-  __ JumpIfSmi(temp, &gotta_call_runtime);
+  __ jmp(&got_literals);
+
+  // r4 is a smi. If it's 0, then we are looking at a cleared WeakCell
+  // around the literals array, and we should visit the runtime. If it's > 0,
+  // then temp already contains the literals array.
+  __ bind(&maybe_cleared_weakcell);
+  __ cmp(r4, Operand(Smi::FromInt(0)));
+  __ b(eq, &gotta_call_runtime);
 
   // Save the literals in the closure.
+  __ bind(&got_literals);
   __ ldr(r4, MemOperand(sp, 0));
   __ str(temp, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
   __ push(index);
@@ -1659,6 +1716,9 @@
 void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
                                                int field_index) {
   // ----------- S t a t e -------------
+  //  -- r0    : number of arguments
+  //  -- r1    : function
+  //  -- cp    : context
   //  -- lr    : return address
   //  -- sp[0] : receiver
   // -----------------------------------
@@ -1668,7 +1728,7 @@
   {
     __ Pop(r0);
     __ JumpIfSmi(r0, &receiver_not_date);
-    __ CompareObjectType(r0, r1, r2, JS_DATE_TYPE);
+    __ CompareObjectType(r0, r2, r3, JS_DATE_TYPE);
     __ b(ne, &receiver_not_date);
   }
 
@@ -1698,7 +1758,14 @@
 
   // 3. Raise a TypeError if the receiver is not a date.
   __ bind(&receiver_not_date);
-  __ TailCallRuntime(Runtime::kThrowNotDateError);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ Push(r0, lr, fp);
+    __ Move(fp, sp);
+    __ Push(cp, r1);
+    __ Push(Smi::FromInt(0));
+    __ CallRuntime(Runtime::kThrowNotDateError);
+  }
 }
 
 // static
@@ -2618,6 +2685,73 @@
   __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
 }
 
+// static
+void Builtins::Generate_StringToNumber(MacroAssembler* masm) {
+  // The StringToNumber stub takes one argument in r0.
+  __ AssertString(r0);
+
+  // Check if string has a cached array index.
+  Label runtime;
+  __ ldr(r2, FieldMemOperand(r0, String::kHashFieldOffset));
+  __ tst(r2, Operand(String::kContainsCachedArrayIndexMask));
+  __ b(ne, &runtime);
+  __ IndexFromHash(r2, r0);
+  __ Ret();
+
+  __ bind(&runtime);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    // Push argument.
+    __ Push(r0);
+    // We cannot use a tail call here because this builtin can also be called
+    // from wasm.
+    __ CallRuntime(Runtime::kStringToNumber);
+  }
+  __ Ret();
+}
+
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+  // The ToNumber stub takes one argument in r0.
+  STATIC_ASSERT(kSmiTag == 0);
+  __ tst(r0, Operand(kSmiTagMask));
+  __ Ret(eq);
+
+  __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
+  // r0: receiver
+  // r1: receiver instance type
+  __ Ret(eq);
+
+  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+          RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_NonNumberToNumber(MacroAssembler* masm) {
+  // The NonNumberToNumber stub takes one argument in r0.
+  __ AssertNotNumber(r0);
+
+  __ CompareObjectType(r0, r1, r1, FIRST_NONSTRING_TYPE);
+  // r0: receiver
+  // r1: receiver instance type
+  __ Jump(masm->isolate()->builtins()->StringToNumber(), RelocInfo::CODE_TARGET,
+          lo);
+
+  Label not_oddball;
+  __ cmp(r1, Operand(ODDBALL_TYPE));
+  __ b(ne, &not_oddball);
+  __ ldr(r0, FieldMemOperand(r0, Oddball::kToNumberOffset));
+  __ Ret();
+  __ bind(&not_oddball);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    // Push argument.
+    __ Push(r0);
+    // We cannot use a tail call here because this builtin can also be called
+    // from wasm.
+    __ CallRuntime(Runtime::kToNumber);
+  }
+  __ Ret();
+}
+
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r0 : actual number of arguments
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 0224f9d..0ef31d7 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -22,70 +22,28 @@
 namespace v8 {
 namespace internal {
 
+#define __ ACCESS_MASM(masm)
 
-static void InitializeArrayConstructorDescriptor(
-    Isolate* isolate, CodeStubDescriptor* descriptor,
-    int constant_stack_parameter_count) {
-  Address deopt_handler = Runtime::FunctionForId(
-      Runtime::kArrayConstructor)->entry;
-
-  if (constant_stack_parameter_count == 0) {
-    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  } else {
-    descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  }
+void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
+  __ lsl(r5, r0, Operand(kPointerSizeLog2));
+  __ str(r1, MemOperand(sp, r5));
+  __ Push(r1);
+  __ Push(r2);
+  __ add(r0, r0, Operand(3));
+  __ TailCallRuntime(Runtime::kNewArray);
 }
 
-
-static void InitializeInternalArrayConstructorDescriptor(
-    Isolate* isolate, CodeStubDescriptor* descriptor,
-    int constant_stack_parameter_count) {
-  Address deopt_handler = Runtime::FunctionForId(
-      Runtime::kInternalArrayConstructor)->entry;
-
-  if (constant_stack_parameter_count == 0) {
-    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  } else {
-    descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  }
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
   Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
   descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
 }
 
-void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+void FastFunctionBindStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
+  Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
+  descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
 }
 
-
-void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
-#define __ ACCESS_MASM(masm)
-
 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
                                           Condition cond);
 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
@@ -942,7 +900,7 @@
   CEntryStub::GenerateAheadOfTime(isolate);
   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
-  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+  CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
   CreateWeakCellStub::GenerateAheadOfTime(isolate);
   BinaryOpICStub::GenerateAheadOfTime(isolate);
@@ -1354,7 +1312,6 @@
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
                                           &miss,  // When index out of range.
-                                          STRING_INDEX_IS_ARRAY_INDEX,
                                           RECEIVER_IS_STRING);
   char_at_generator.GenerateFast(masm);
   __ Ret();
@@ -1798,6 +1755,7 @@
   // r2 : feedback vector
   // r3 : slot in feedback vector (Smi)
   Label initialize, done, miss, megamorphic, not_array_function;
+  Label done_initialize_count, done_increment_count;
 
   DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
             masm->isolate()->heap()->megamorphic_symbol());
@@ -1817,7 +1775,7 @@
   Register weak_value = r9;
   __ ldr(weak_value, FieldMemOperand(r5, WeakCell::kValueOffset));
   __ cmp(r1, weak_value);
-  __ b(eq, &done);
+  __ b(eq, &done_increment_count);
   __ CompareRoot(r5, Heap::kmegamorphic_symbolRootIndex);
   __ b(eq, &done);
   __ ldr(feedback_map, FieldMemOperand(r5, HeapObject::kMapOffset));
@@ -1840,7 +1798,7 @@
   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
   __ cmp(r1, r5);
   __ b(ne, &megamorphic);
-  __ jmp(&done);
+  __ jmp(&done_increment_count);
 
   __ bind(&miss);
 
@@ -1869,11 +1827,28 @@
   // slot.
   CreateAllocationSiteStub create_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &create_stub);
-  __ b(&done);
+  __ b(&done_initialize_count);
 
   __ bind(&not_array_function);
   CreateWeakCellStub weak_cell_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &weak_cell_stub);
+
+  __ bind(&done_initialize_count);
+  // Initialize the call counter.
+  __ Move(r5, Operand(Smi::FromInt(1)));
+  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+  __ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + kPointerSize));
+  __ b(&done);
+
+  __ bind(&done_increment_count);
+
+  // Increment the call count for monomorphic function calls.
+  __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
+  __ add(r5, r5, Operand(FixedArray::kHeaderSize + kPointerSize));
+  __ ldr(r4, FieldMemOperand(r5, 0));
+  __ add(r4, r4, Operand(Smi::FromInt(1)));
+  __ str(r4, FieldMemOperand(r5, 0));
+
   __ bind(&done);
 }
 
@@ -1935,7 +1910,7 @@
   __ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
   __ add(r2, r2, Operand(FixedArray::kHeaderSize + kPointerSize));
   __ ldr(r3, FieldMemOperand(r2, 0));
-  __ add(r3, r3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+  __ add(r3, r3, Operand(Smi::FromInt(1)));
   __ str(r3, FieldMemOperand(r2, 0));
 
   __ mov(r2, r4);
@@ -1983,7 +1958,7 @@
   __ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
   __ add(r2, r2, Operand(FixedArray::kHeaderSize + kPointerSize));
   __ ldr(r3, FieldMemOperand(r2, 0));
-  __ add(r3, r3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+  __ add(r3, r3, Operand(Smi::FromInt(1)));
   __ str(r3, FieldMemOperand(r2, 0));
 
   __ bind(&call_function);
@@ -2054,7 +2029,7 @@
   __ b(ne, &miss);
 
   // Initialize the call counter.
-  __ Move(r5, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+  __ Move(r5, Operand(Smi::FromInt(1)));
   __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
   __ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + kPointerSize));
 
@@ -2152,13 +2127,7 @@
     // index_ is consumed by runtime conversion function.
     __ Push(object_, index_);
   }
-  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
-    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
-  } else {
-    DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
-    // NumberToSmi discards numbers that are not exact integers.
-    __ CallRuntime(Runtime::kNumberToSmi);
-  }
+  __ CallRuntime(Runtime::kNumberToSmi);
   // Save the conversion result before the pop instructions below
   // have a chance to overwrite it.
   __ Move(index_, r0);
@@ -2488,67 +2457,13 @@
   // r3: from index (untagged)
   __ SmiTag(r3, r3);
   StringCharAtGenerator generator(r0, r3, r2, r0, &runtime, &runtime, &runtime,
-                                  STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
+                                  RECEIVER_IS_STRING);
   generator.GenerateFast(masm);
   __ Drop(3);
   __ Ret();
   generator.SkipSlow(masm, &runtime);
 }
 
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
-  // The ToNumber stub takes one argument in r0.
-  STATIC_ASSERT(kSmiTag == 0);
-  __ tst(r0, Operand(kSmiTagMask));
-  __ Ret(eq);
-
-  __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
-  // r0: receiver
-  // r1: receiver instance type
-  __ Ret(eq);
-
-  NonNumberToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
-}
-
-void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
-  // The NonNumberToNumber stub takes one argument in r0.
-  __ AssertNotNumber(r0);
-
-  __ CompareObjectType(r0, r1, r1, FIRST_NONSTRING_TYPE);
-  // r0: receiver
-  // r1: receiver instance type
-  StringToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub, lo);
-
-  Label not_oddball;
-  __ cmp(r1, Operand(ODDBALL_TYPE));
-  __ b(ne, &not_oddball);
-  __ ldr(r0, FieldMemOperand(r0, Oddball::kToNumberOffset));
-  __ Ret();
-  __ bind(&not_oddball);
-
-  __ Push(r0);  // Push argument.
-  __ TailCallRuntime(Runtime::kToNumber);
-}
-
-void StringToNumberStub::Generate(MacroAssembler* masm) {
-  // The StringToNumber stub takes one argument in r0.
-  __ AssertString(r0);
-
-  // Check if string has a cached array index.
-  Label runtime;
-  __ ldr(r2, FieldMemOperand(r0, String::kHashFieldOffset));
-  __ tst(r2, Operand(String::kContainsCachedArrayIndexMask));
-  __ b(ne, &runtime);
-  __ IndexFromHash(r2, r0);
-  __ Ret();
-
-  __ bind(&runtime);
-  __ Push(r0);  // Push argument.
-  __ TailCallRuntime(Runtime::kStringToNumber);
-}
-
 void ToStringStub::Generate(MacroAssembler* masm) {
   // The ToString stub takes one argument in r0.
   Label is_number;
@@ -2714,7 +2629,7 @@
   // Load r2 with the allocation site.  We stick an undefined dummy value here
   // and replace it with the real allocation site later when we instantiate this
   // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
-  __ Move(r2, handle(isolate()->heap()->undefined_value()));
+  __ Move(r2, isolate()->factory()->undefined_value());
 
   // Make sure that we actually patched the allocation site.
   if (FLAG_debug_code) {
@@ -3559,14 +3474,14 @@
 
 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  LoadICStub stub(isolate(), state());
+  LoadICStub stub(isolate());
   stub.GenerateForTrampoline(masm);
 }
 
 
 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  KeyedLoadICStub stub(isolate(), state());
+  KeyedLoadICStub stub(isolate());
   stub.GenerateForTrampoline(masm);
 }
 
@@ -4196,19 +4111,13 @@
   }
 }
 
-
-void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
       isolate);
   ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
       isolate);
-  ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
-      isolate);
-}
-
-
-void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
-    Isolate* isolate) {
+  ArrayNArgumentsConstructorStub stub(isolate);
+  stub.GetCode();
   ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
   for (int i = 0; i < 2; i++) {
     // For internal arrays we only need a few things
@@ -4216,8 +4125,6 @@
     stubh1.GetCode();
     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
     stubh2.GetCode();
-    InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
-    stubh3.GetCode();
   }
 }
 
@@ -4237,13 +4144,15 @@
     CreateArrayDispatchOneArgument(masm, mode);
 
     __ bind(&not_one_case);
-    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+    ArrayNArgumentsConstructorStub stub(masm->isolate());
+    __ TailCallStub(&stub);
   } else if (argument_count() == NONE) {
     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
   } else if (argument_count() == ONE) {
     CreateArrayDispatchOneArgument(masm, mode);
   } else if (argument_count() == MORE_THAN_ONE) {
-    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+    ArrayNArgumentsConstructorStub stub(masm->isolate());
+    __ TailCallStub(&stub);
   } else {
     UNREACHABLE();
   }
@@ -4325,7 +4234,7 @@
   InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
   __ TailCallStub(&stub0, lo);
 
-  InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+  ArrayNArgumentsConstructorStub stubN(isolate());
   __ TailCallStub(&stubN, hi);
 
   if (IsFastPackedElementsKind(kind)) {
@@ -4547,10 +4456,10 @@
   // specified by the function's internal formal parameter count.
   Label rest_parameters;
   __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
-  __ ldr(r1,
-         FieldMemOperand(r1, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ sub(r0, r0, r1, SetCC);
+  __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(r3,
+         FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ sub(r0, r0, r3, SetCC);
   __ b(gt, &rest_parameters);
 
   // Return an empty rest parameter array.
@@ -4597,15 +4506,16 @@
     // ----------- S t a t e -------------
     //  -- cp : context
     //  -- r0 : number of rest parameters (tagged)
+    //  -- r1 : function
     //  -- r2 : pointer to first rest parameters
     //  -- lr : return address
     // -----------------------------------
 
     // Allocate space for the rest parameter array plus the backing store.
     Label allocate, done_allocate;
-    __ mov(r1, Operand(JSArray::kSize + FixedArray::kHeaderSize));
-    __ add(r1, r1, Operand(r0, LSL, kPointerSizeLog2 - 1));
-    __ Allocate(r1, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
+    __ mov(r6, Operand(JSArray::kSize + FixedArray::kHeaderSize));
+    __ add(r6, r6, Operand(r0, LSL, kPointerSizeLog2 - 1));
+    __ Allocate(r6, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
     __ bind(&done_allocate);
 
     // Setup the elements array in r3.
@@ -4637,17 +4547,25 @@
     __ mov(r0, r4);
     __ Ret();
 
-    // Fall back to %AllocateInNewSpace.
+    // Fall back to %AllocateInNewSpace (if not too big).
+    Label too_big_for_new_space;
     __ bind(&allocate);
+    __ cmp(r6, Operand(Page::kMaxRegularHeapObjectSize));
+    __ b(gt, &too_big_for_new_space);
     {
       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-      __ SmiTag(r1);
-      __ Push(r0, r2, r1);
+      __ SmiTag(r6);
+      __ Push(r0, r2, r6);
       __ CallRuntime(Runtime::kAllocateInNewSpace);
       __ mov(r3, r0);
       __ Pop(r0, r2);
     }
     __ jmp(&done_allocate);
+
+    // Fall back to %NewRestParameter.
+    __ bind(&too_big_for_new_space);
+    __ push(r1);
+    __ TailCallRuntime(Runtime::kNewRestParameter);
   }
 }
 
@@ -4906,9 +4824,9 @@
   __ cmp(ip, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ b(eq, &arguments_adaptor);
   {
-    __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+    __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
     __ ldr(r0, FieldMemOperand(
-                   r1, SharedFunctionInfo::kFormalParameterCountOffset));
+                   r4, SharedFunctionInfo::kFormalParameterCountOffset));
     __ add(r2, r2, Operand(r0, LSL, kPointerSizeLog2 - 1));
     __ add(r2, r2,
            Operand(StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
@@ -4926,15 +4844,16 @@
   // ----------- S t a t e -------------
   //  -- cp : context
   //  -- r0 : number of rest parameters (tagged)
+  //  -- r1 : function
   //  -- r2 : pointer to first rest parameters
   //  -- lr : return address
   // -----------------------------------
 
   // Allocate space for the strict arguments object plus the backing store.
   Label allocate, done_allocate;
-  __ mov(r1, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
-  __ add(r1, r1, Operand(r0, LSL, kPointerSizeLog2 - 1));
-  __ Allocate(r1, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
+  __ mov(r6, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+  __ add(r6, r6, Operand(r0, LSL, kPointerSizeLog2 - 1));
+  __ Allocate(r6, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
   __ bind(&done_allocate);
 
   // Setup the elements array in r3.
@@ -4966,44 +4885,25 @@
   __ mov(r0, r4);
   __ Ret();
 
-  // Fall back to %AllocateInNewSpace.
+  // Fall back to %AllocateInNewSpace (if not too big).
+  Label too_big_for_new_space;
   __ bind(&allocate);
+  __ cmp(r6, Operand(Page::kMaxRegularHeapObjectSize));
+  __ b(gt, &too_big_for_new_space);
   {
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    __ SmiTag(r1);
-    __ Push(r0, r2, r1);
+    __ SmiTag(r6);
+    __ Push(r0, r2, r6);
     __ CallRuntime(Runtime::kAllocateInNewSpace);
     __ mov(r3, r0);
     __ Pop(r0, r2);
   }
   __ b(&done_allocate);
-}
 
-
-void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
-  Register context = cp;
-  Register result = r0;
-  Register slot = r2;
-
-  // Go up the context chain to the script context.
-  for (int i = 0; i < depth(); ++i) {
-    __ ldr(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
-    context = result;
-  }
-
-  // Load the PropertyCell value at the specified slot.
-  __ add(result, context, Operand(slot, LSL, kPointerSizeLog2));
-  __ ldr(result, ContextMemOperand(result));
-  __ ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
-
-  // If the result is not the_hole, return. Otherwise, handle in the runtime.
-  __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
-  __ Ret(ne);
-
-  // Fallback to runtime.
-  __ SmiTag(slot);
-  __ push(slot);
-  __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
+  // Fall back to %NewStrictArguments.
+  __ bind(&too_big_for_new_space);
+  __ push(r1);
+  __ TailCallRuntime(Runtime::kNewStrictArguments);
 }
 
 
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 4014aba..a7b38ff 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -16,68 +16,6 @@
 
 #define __ masm.
 
-
-#if defined(USE_SIMULATOR)
-byte* fast_exp_arm_machine_code = nullptr;
-double fast_exp_simulator(double x, Isolate* isolate) {
-  return Simulator::current(isolate)
-      ->CallFPReturnsDouble(fast_exp_arm_machine_code, x, 0);
-}
-#endif
-
-
-UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
-  size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
-  if (buffer == nullptr) return nullptr;
-  ExternalReference::InitializeMathExpData();
-
-  MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
-                      CodeObjectRequired::kNo);
-
-  {
-    DwVfpRegister input = d0;
-    DwVfpRegister result = d1;
-    DwVfpRegister double_scratch1 = d2;
-    DwVfpRegister double_scratch2 = d3;
-    Register temp1 = r4;
-    Register temp2 = r5;
-    Register temp3 = r6;
-
-    if (masm.use_eabi_hardfloat()) {
-      // Input value is in d0 anyway, nothing to do.
-    } else {
-      __ vmov(input, r0, r1);
-    }
-    __ Push(temp3, temp2, temp1);
-    MathExpGenerator::EmitMathExp(
-        &masm, input, result, double_scratch1, double_scratch2,
-        temp1, temp2, temp3);
-    __ Pop(temp3, temp2, temp1);
-    if (masm.use_eabi_hardfloat()) {
-      __ vmov(d0, result);
-    } else {
-      __ vmov(r0, r1, result);
-    }
-    __ Ret();
-  }
-
-  CodeDesc desc;
-  masm.GetCode(&desc);
-  DCHECK(!RelocInfo::RequiresRelocation(desc));
-
-  Assembler::FlushICache(isolate, buffer, actual_size);
-  base::OS::ProtectCode(buffer, actual_size);
-
-#if !defined(USE_SIMULATOR)
-  return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
-#else
-  fast_exp_arm_machine_code = buffer;
-  return &fast_exp_simulator;
-#endif
-}
-
 #if defined(V8_HOST_ARCH_ARM)
 MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
                                                 MemCopyUint8Function stub) {
@@ -794,94 +732,6 @@
   __ bind(&done);
 }
 
-
-static MemOperand ExpConstant(int index, Register base) {
-  return MemOperand(base, index * kDoubleSize);
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
-                                   DwVfpRegister input,
-                                   DwVfpRegister result,
-                                   DwVfpRegister double_scratch1,
-                                   DwVfpRegister double_scratch2,
-                                   Register temp1,
-                                   Register temp2,
-                                   Register temp3) {
-  DCHECK(!input.is(result));
-  DCHECK(!input.is(double_scratch1));
-  DCHECK(!input.is(double_scratch2));
-  DCHECK(!result.is(double_scratch1));
-  DCHECK(!result.is(double_scratch2));
-  DCHECK(!double_scratch1.is(double_scratch2));
-  DCHECK(!temp1.is(temp2));
-  DCHECK(!temp1.is(temp3));
-  DCHECK(!temp2.is(temp3));
-  DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
-  DCHECK(!masm->serializer_enabled());  // External references not serializable.
-
-  Label zero, infinity, done;
-
-  __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
-
-  __ vldr(double_scratch1, ExpConstant(0, temp3));
-  __ VFPCompareAndSetFlags(double_scratch1, input);
-  __ b(ge, &zero);
-
-  __ vldr(double_scratch2, ExpConstant(1, temp3));
-  __ VFPCompareAndSetFlags(input, double_scratch2);
-  __ b(ge, &infinity);
-
-  __ vldr(double_scratch1, ExpConstant(3, temp3));
-  __ vldr(result, ExpConstant(4, temp3));
-  __ vmul(double_scratch1, double_scratch1, input);
-  __ vadd(double_scratch1, double_scratch1, result);
-  __ VmovLow(temp2, double_scratch1);
-  __ vsub(double_scratch1, double_scratch1, result);
-  __ vldr(result, ExpConstant(6, temp3));
-  __ vldr(double_scratch2, ExpConstant(5, temp3));
-  __ vmul(double_scratch1, double_scratch1, double_scratch2);
-  __ vsub(double_scratch1, double_scratch1, input);
-  __ vsub(result, result, double_scratch1);
-  __ vmul(double_scratch2, double_scratch1, double_scratch1);
-  __ vmul(result, result, double_scratch2);
-  __ vldr(double_scratch2, ExpConstant(7, temp3));
-  __ vmul(result, result, double_scratch2);
-  __ vsub(result, result, double_scratch1);
-  // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
-  DCHECK(*reinterpret_cast<double*>
-         (ExternalReference::math_exp_constants(8).address()) == 1);
-  __ vmov(double_scratch2, 1);
-  __ vadd(result, result, double_scratch2);
-  __ mov(temp1, Operand(temp2, LSR, 11));
-  __ Ubfx(temp2, temp2, 0, 11);
-  __ add(temp1, temp1, Operand(0x3ff));
-
-  // Must not call ExpConstant() after overwriting temp3!
-  __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
-  __ add(temp3, temp3, Operand(temp2, LSL, 3));
-  __ ldm(ia, temp3, temp2.bit() | temp3.bit());
-  // The first word is loaded is the lower number register.
-  if (temp2.code() < temp3.code()) {
-    __ orr(temp1, temp3, Operand(temp1, LSL, 20));
-    __ vmov(double_scratch1, temp2, temp1);
-  } else {
-    __ orr(temp1, temp2, Operand(temp1, LSL, 20));
-    __ vmov(double_scratch1, temp3, temp1);
-  }
-  __ vmul(result, result, double_scratch1);
-  __ b(&done);
-
-  __ bind(&zero);
-  __ vmov(result, kDoubleRegZero);
-  __ b(&done);
-
-  __ bind(&infinity);
-  __ vldr(result, ExpConstant(2, temp3));
-
-  __ bind(&done);
-}
-
 #undef __
 
 #ifdef DEBUG
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 880825a..0086739 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -28,22 +28,6 @@
 };
 
 
-class MathExpGenerator : public AllStatic {
- public:
-  // Register input isn't modified. All other registers are clobbered.
-  static void EmitMathExp(MacroAssembler* masm,
-                          DwVfpRegister input,
-                          DwVfpRegister result,
-                          DwVfpRegister double_scratch1,
-                          DwVfpRegister double_scratch2,
-                          Register temp1,
-                          Register temp2,
-                          Register temp3);
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 2785b75..c569e66 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -66,15 +66,12 @@
     Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
     // We need calls to have a predictable size in the unoptimized code, but
     // this is optimized code, so we don't have to have a predictable size.
-    int call_size_in_bytes =
-        MacroAssembler::CallSizeNotPredictableCodeSize(isolate,
-                                                       deopt_entry,
-                                                       RelocInfo::NONE32);
+    int call_size_in_bytes = MacroAssembler::CallDeoptimizerSize();
     int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
     DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
     DCHECK(call_size_in_bytes <= patch_size());
     CodePatcher patcher(isolate, call_address, call_size_in_words);
-    patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
+    patcher.masm()->CallDeoptimizer(deopt_entry);
     DCHECK(prev_call_address == NULL ||
            call_address >= prev_call_address + patch_size());
     DCHECK(call_address + patch_size() <= code->instruction_end());
@@ -189,8 +186,7 @@
   // Copy VFP registers to
   // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
   int double_regs_offset = FrameDescription::double_registers_offset();
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
     int code = config->GetAllocatableDoubleCode(i);
     int dst_offset = code * kDoubleSize + double_regs_offset;
@@ -307,15 +303,50 @@
 void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
   // Create a sequence of deoptimization entries.
   // Note that registers are still live when jumping to an entry.
-  Label done;
-  for (int i = 0; i < count(); i++) {
-    int start = masm()->pc_offset();
-    USE(start);
-    __ mov(ip, Operand(i));
-    __ b(&done);
-    DCHECK(masm()->pc_offset() - start == table_entry_size_);
+
+  // We need to be able to generate immediates up to kMaxNumberOfEntries. On
+  // ARMv7, we can use movw (with a maximum immediate of 0xffff). On ARMv6, we
+  // need two instructions.
+  STATIC_ASSERT((kMaxNumberOfEntries - 1) <= 0xffff);
+  if (CpuFeatures::IsSupported(ARMv7)) {
+    CpuFeatureScope scope(masm(), ARMv7);
+    Label done;
+    for (int i = 0; i < count(); i++) {
+      int start = masm()->pc_offset();
+      USE(start);
+      __ movw(ip, i);
+      __ b(&done);
+      DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start);
+    }
+    __ bind(&done);
+  } else {
+    // We want to keep table_entry_size_ == 8 (since this is the common case),
+    // but we need two instructions to load most immediates over 0xff. To handle
+    // this, we set the low byte in the main table, and then set the high byte
+    // in a separate table if necessary.
+    Label high_fixes[256];
+    int high_fix_max = (count() - 1) >> 8;
+    DCHECK_GT(arraysize(high_fixes), high_fix_max);
+    for (int i = 0; i < count(); i++) {
+      int start = masm()->pc_offset();
+      USE(start);
+      __ mov(ip, Operand(i & 0xff));  // Set the low byte.
+      __ b(&high_fixes[i >> 8]);      // Jump to the secondary table.
+      DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start);
+    }
+    // Generate the secondary table, to set the high byte.
+    for (int high = 1; high <= high_fix_max; high++) {
+      __ bind(&high_fixes[high]);
+      __ orr(ip, ip, Operand(high << 8));
+      // If this isn't the last entry, emit a branch to the end of the table.
+      // The last entry can just fall through.
+      if (high < high_fix_max) __ b(&high_fixes[0]);
+    }
+    // Bind high_fixes[0] last, for indices like 0x00**. This case requires no
+    // fix-up, so for (common) small tables we can jump here, then just fall
+    // through with no additional branch.
+    __ bind(&high_fixes[0]);
   }
-  __ bind(&done);
   __ push(ip);
 }
 
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 20a898e..1bb33fa 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -40,6 +40,7 @@
 namespace v8 {
 namespace internal {
 
+const auto GetRegConfig = RegisterConfiguration::Crankshaft;
 
 //------------------------------------------------------------------------------
 
@@ -755,7 +756,45 @@
           Format(instr, "'um'al'cond's 'rd, 'rn, 'rm, 'rs");
         }
       } else {
-        Unknown(instr);  // not used by V8
+        if (instr->Bits(24, 23) == 3) {
+          if (instr->Bit(20) == 1) {
+            // ldrex
+            switch (instr->Bits(22, 21)) {
+              case 0:
+                Format(instr, "ldrex'cond 'rt, ['rn]");
+                break;
+              case 2:
+                Format(instr, "ldrexb'cond 'rt, ['rn]");
+                break;
+              case 3:
+                Format(instr, "ldrexh'cond 'rt, ['rn]");
+                break;
+              default:
+                UNREACHABLE();
+                break;
+            }
+          } else {
+            // strex
+            // The instruction is documented as strex rd, rt, [rn], but the
+            // "rt" register is using the rm bits.
+            switch (instr->Bits(22, 21)) {
+              case 0:
+                Format(instr, "strex'cond 'rd, 'rm, ['rn]");
+                break;
+              case 2:
+                Format(instr, "strexb'cond 'rd, 'rm, ['rn]");
+                break;
+              case 3:
+                Format(instr, "strexh'cond 'rd, 'rm, ['rn]");
+                break;
+              default:
+                UNREACHABLE();
+                break;
+            }
+          }
+        } else {
+          Unknown(instr);  // not used by V8
+        }
       }
     } else if ((instr->Bit(20) == 0) && ((instr->Bits(7, 4) & 0xd) == 0xd)) {
       // ldrd, strd
@@ -2010,7 +2049,7 @@
 
 
 const char* NameConverter::NameOfAddress(byte* addr) const {
-  v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+  v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
   return tmp_buffer_.start();
 }
 
@@ -2021,7 +2060,7 @@
 
 
 const char* NameConverter::NameOfCPURegister(int reg) const {
-  return v8::internal::Register::from_code(reg).ToString();
+  return v8::internal::GetRegConfig()->GetGeneralRegisterName(reg);
 }
 
 
@@ -2073,9 +2112,8 @@
     buffer[0] = '\0';
     byte* prev_pc = pc;
     pc += d.InstructionDecode(buffer, pc);
-    v8::internal::PrintF(
-        f, "%p    %08x      %s\n",
-        prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+    v8::internal::PrintF(f, "%p    %08x      %s\n", static_cast<void*>(prev_pc),
+                         *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
   }
 }
 
diff --git a/src/arm/interface-descriptors-arm.cc b/src/arm/interface-descriptors-arm.cc
index 4e8c95c..fa0c040 100644
--- a/src/arm/interface-descriptors-arm.cc
+++ b/src/arm/interface-descriptors-arm.cc
@@ -13,6 +13,14 @@
 
 const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
 
+void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
+    CallInterfaceDescriptorData* data, int register_parameter_count) {
+  const Register default_stub_registers[] = {r0, r1, r2, r3, r4};
+  CHECK_LE(static_cast<size_t>(register_parameter_count),
+           arraysize(default_stub_registers));
+  data->InitializePlatformSpecific(register_parameter_count,
+                                   default_stub_registers);
+}
 
 const Register LoadDescriptor::ReceiverRegister() { return r1; }
 const Register LoadDescriptor::NameRegister() { return r2; }
@@ -41,9 +49,6 @@
 const Register StoreTransitionDescriptor::MapRegister() { return r3; }
 
 
-const Register LoadGlobalViaContextDescriptor::SlotRegister() { return r2; }
-
-
 const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r2; }
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r0; }
 
@@ -65,8 +70,6 @@
 const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
 const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
 
-const Register HasPropertyDescriptor::ObjectRegister() { return r0; }
-const Register HasPropertyDescriptor::KeyRegister() { return r3; }
 
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -255,18 +258,17 @@
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
+void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // register state
   // r0 -- number of arguments
   // r1 -- function
   // r2 -- allocation site with elements kind
-  Register registers[] = {r1, r2};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
+  Register registers[] = {r1, r2, r0};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
+void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // stack param count needs (constructor pointer, and single argument)
   Register registers[] = {r1, r2, r0};
@@ -274,24 +276,7 @@
 }
 
 
-void InternalArrayConstructorConstantArgCountDescriptor::
-    InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
-  // register state
-  // r0 -- number of arguments
-  // r1 -- constructor function
-  Register registers[] = {r1};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  // stack param count needs (constructor pointer, and single argument)
-  Register registers[] = {r1, r0};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastArrayPushDescriptor::InitializePlatformSpecific(
+void VarArgFunctionDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // stack param count needs (arg count)
   Register registers[] = {r0};
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index d723251..4feadb7 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -89,17 +89,6 @@
 }
 
 
-int MacroAssembler::CallSizeNotPredictableCodeSize(Isolate* isolate,
-                                                   Address target,
-                                                   RelocInfo::Mode rmode,
-                                                   Condition cond) {
-  Instr mov_instr = cond | MOV | LeaveCC;
-  Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
-  return kInstrSize +
-         mov_operand.instructions_required(NULL, mov_instr) * kInstrSize;
-}
-
-
 void MacroAssembler::Call(Address target,
                           RelocInfo::Mode rmode,
                           Condition cond,
@@ -131,12 +120,6 @@
   //  blx   ip
   //                      @ return address
 
-  // Statement positions are expected to be recorded when the target
-  // address is loaded. The mov method will automatically record
-  // positions when pc is the target, since this is not the case here
-  // we have to do it explicitly.
-  positions_recorder()->WriteRecordedPositions();
-
   mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
   blx(ip, cond);
 
@@ -173,6 +156,40 @@
   Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
 }
 
+void MacroAssembler::CallDeoptimizer(Address target) {
+  BlockConstPoolScope block_const_pool(this);
+
+  uintptr_t target_raw = reinterpret_cast<uintptr_t>(target);
+
+  // We use blx, like a call, but it does not return here. The link register is
+  // used by the deoptimizer to work out what called it.
+  if (CpuFeatures::IsSupported(ARMv7)) {
+    CpuFeatureScope scope(this, ARMv7);
+    movw(ip, target_raw & 0xffff);
+    movt(ip, (target_raw >> 16) & 0xffff);
+    blx(ip);
+  } else {
+    // We need to load a literal, but we can't use the usual constant pool
+    // because we call this from a patcher, and cannot afford the guard
+    // instruction and other administrative overhead.
+    ldr(ip, MemOperand(pc, (2 * kInstrSize) - kPcLoadDelta));
+    blx(ip);
+    dd(target_raw);
+  }
+}
+
+int MacroAssembler::CallDeoptimizerSize() {
+  // ARMv7+:
+  //    movw    ip, ...
+  //    movt    ip, ...
+  //    blx     ip              @ This never returns.
+  //
+  // ARMv6:
+  //    ldr     ip, =address
+  //    blx     ip              @ This never returns.
+  //    .word   address
+  return 3 * kInstrSize;
+}
 
 void MacroAssembler::Ret(Condition cond) {
   bx(lr, cond);
@@ -245,6 +262,11 @@
   }
 }
 
+void MacroAssembler::Move(SwVfpRegister dst, SwVfpRegister src) {
+  if (!dst.is(src)) {
+    vmov(dst, src);
+  }
+}
 
 void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
   if (!dst.is(src)) {
@@ -252,11 +274,10 @@
   }
 }
 
-
 void MacroAssembler::Mls(Register dst, Register src1, Register src2,
                          Register srcA, Condition cond) {
-  if (CpuFeatures::IsSupported(MLS)) {
-    CpuFeatureScope scope(this, MLS);
+  if (CpuFeatures::IsSupported(ARMv7)) {
+    CpuFeatureScope scope(this, ARMv7);
     mls(dst, src1, src2, srcA, cond);
   } else {
     DCHECK(!srcA.is(ip));
@@ -841,8 +862,7 @@
   // Number of d-regs not known at snapshot time.
   DCHECK(!serializer_enabled());
   // General purpose registers are pushed last on the stack.
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
   int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
   return MemOperand(sp, doubles_size + register_offset);
@@ -1237,9 +1257,8 @@
 
 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
   ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  ldr(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
-  ldr(vector,
-      FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+  ldr(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
+  ldr(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
 }
 
 
@@ -1553,12 +1572,13 @@
                                              const ParameterCount& expected,
                                              const ParameterCount& actual) {
   Label skip_flooding;
-  ExternalReference step_in_enabled =
-      ExternalReference::debug_step_in_enabled_address(isolate());
-  mov(r4, Operand(step_in_enabled));
-  ldrb(r4, MemOperand(r4));
-  cmp(r4, Operand(0));
-  b(eq, &skip_flooding);
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  mov(r4, Operand(last_step_action));
+  ldrsb(r4, MemOperand(r4));
+  cmp(r4, Operand(StepIn));
+  b(lt, &skip_flooding);
   {
     FrameScope frame(this,
                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -3870,8 +3890,7 @@
   if (reg5.is_valid()) regs |= reg5.bit();
   if (reg6.is_valid()) regs |= reg6.bit();
 
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
     int code = config->GetAllocatableGeneralCode(i);
     Register candidate = Register::from_code(code);
@@ -3969,6 +3988,10 @@
     Assembler::FlushICache(masm_.isolate(), address_, size_);
   }
 
+  // Check that we don't have any pending constant pools.
+  DCHECK(masm_.pending_32_bit_constants_.empty());
+  DCHECK(masm_.pending_64_bit_constants_.empty());
+
   // Check that the code was patched as expected.
   DCHECK(masm_.pc_ == address_ + size_);
   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 8fa197c..16dcd47 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -101,10 +101,6 @@
   int CallStubSize(CodeStub* stub,
                    TypeFeedbackId ast_id = TypeFeedbackId::None(),
                    Condition cond = al);
-  static int CallSizeNotPredictableCodeSize(Isolate* isolate,
-                                            Address target,
-                                            RelocInfo::Mode rmode,
-                                            Condition cond = al);
 
   // Jump, Call, and Ret pseudo instructions implementing inter-working.
   void Jump(Register target, Condition cond = al);
@@ -114,17 +110,19 @@
   void Call(Address target, RelocInfo::Mode rmode,
             Condition cond = al,
             TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
+  void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+            TypeFeedbackId ast_id = TypeFeedbackId::None(), Condition cond = al,
+            TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
   int CallSize(Handle<Code> code,
                RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
                TypeFeedbackId ast_id = TypeFeedbackId::None(),
                Condition cond = al);
-  void Call(Handle<Code> code,
-            RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
-            TypeFeedbackId ast_id = TypeFeedbackId::None(),
-            Condition cond = al,
-            TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
   void Ret(Condition cond = al);
 
+  // Used for patching in calls to the deoptimizer.
+  void CallDeoptimizer(Address target);
+  static int CallDeoptimizerSize();
+
   // Emit code to discard a non-negative number of pointer-sized elements
   // from the stack, clobbering only the sp register.
   void Drop(int count, Condition cond = al);
@@ -172,6 +170,7 @@
       mov(dst, src, sbit, cond);
     }
   }
+  void Move(SwVfpRegister dst, SwVfpRegister src);
   void Move(DwVfpRegister dst, DwVfpRegister src);
 
   void Load(Register dst, const MemOperand& src, Representation r);
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 1a870c5..afe31db 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -299,8 +299,11 @@
           if (strcmp(arg1, "all") == 0) {
             for (int i = 0; i < kNumRegisters; i++) {
               value = GetRegisterValue(i);
-              PrintF("%3s: 0x%08x %10d", Register::from_code(i).ToString(),
-                     value, value);
+              PrintF(
+                  "%3s: 0x%08x %10d",
+                  RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
+                      i),
+                  value, value);
               if ((argc == 3 && strcmp(arg2, "fp") == 0) &&
                   i < 8 &&
                   (i % 2) == 0) {
@@ -633,9 +636,7 @@
   last_debugger_input_ = input;
 }
 
-
-void Simulator::FlushICache(v8::internal::HashMap* i_cache,
-                            void* start_addr,
+void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
                             size_t size) {
   intptr_t start = reinterpret_cast<intptr_t>(start_addr);
   int intra_line = (start & CachePage::kLineMask);
@@ -656,10 +657,8 @@
   }
 }
 
-
-CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
-  v8::internal::HashMap::Entry* entry =
-      i_cache->LookupOrInsert(page, ICacheHash(page));
+CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
+  base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
   if (entry->value == NULL) {
     CachePage* new_page = new CachePage();
     entry->value = new_page;
@@ -669,9 +668,7 @@
 
 
 // Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
-                             intptr_t start,
-                             int size) {
+void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start, int size) {
   DCHECK(size <= CachePage::kPageSize);
   DCHECK(AllOnOnePage(start, size - 1));
   DCHECK((start & CachePage::kLineMask) == 0);
@@ -683,9 +680,7 @@
   memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
 }
 
-
-void Simulator::CheckICache(v8::internal::HashMap* i_cache,
-                            Instruction* instr) {
+void Simulator::CheckICache(base::HashMap* i_cache, Instruction* instr) {
   intptr_t address = reinterpret_cast<intptr_t>(instr);
   void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
   void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@@ -718,7 +713,7 @@
 Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
   i_cache_ = isolate_->simulator_i_cache();
   if (i_cache_ == NULL) {
-    i_cache_ = new v8::internal::HashMap(&ICacheMatch);
+    i_cache_ = new base::HashMap(&ICacheMatch);
     isolate_->set_simulator_i_cache(i_cache_);
   }
   Initialize(isolate);
@@ -850,10 +845,10 @@
 
 
 // static
-void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
+void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
   Redirection::DeleteChain(first);
   if (i_cache != nullptr) {
-    for (HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
+    for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
          entry = i_cache->Next(entry)) {
       delete static_cast<CachePage*>(entry->value);
     }
@@ -1808,15 +1803,17 @@
           case ExternalReference::BUILTIN_FP_FP_CALL:
           case ExternalReference::BUILTIN_COMPARE_CALL:
             PrintF("Call to host function at %p with args %f, %f",
-                   FUNCTION_ADDR(generic_target), dval0, dval1);
+                   static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0,
+                   dval1);
             break;
           case ExternalReference::BUILTIN_FP_CALL:
             PrintF("Call to host function at %p with arg %f",
-                FUNCTION_ADDR(generic_target), dval0);
+                   static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0);
             break;
           case ExternalReference::BUILTIN_FP_INT_CALL:
             PrintF("Call to host function at %p with args %f, %d",
-                   FUNCTION_ADDR(generic_target), dval0, ival);
+                   static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0,
+                   ival);
             break;
           default:
             UNREACHABLE();
@@ -1942,7 +1939,8 @@
           PrintF(
               "Call to host triple returning runtime function %p "
               "args %08x, %08x, %08x, %08x, %08x",
-              FUNCTION_ADDR(target), arg1, arg2, arg3, arg4, arg5);
+              static_cast<void*>(FUNCTION_ADDR(target)), arg1, arg2, arg3, arg4,
+              arg5);
           if (!stack_aligned) {
             PrintF(" with unaligned stack %08x\n", get_register(sp));
           }
@@ -1953,7 +1951,8 @@
         // pass it to the target function.
         ObjectTriple result = target(arg1, arg2, arg3, arg4, arg5);
         if (::v8::internal::FLAG_trace_sim) {
-          PrintF("Returned { %p, %p, %p }\n", result.x, result.y, result.z);
+          PrintF("Returned { %p, %p, %p }\n", static_cast<void*>(result.x),
+                 static_cast<void*>(result.y), static_cast<void*>(result.z));
         }
         // Return is passed back in address pointed to by hidden first argument.
         ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(arg0);
@@ -1969,13 +1968,8 @@
           PrintF(
               "Call to host function at %p "
               "args %08x, %08x, %08x, %08x, %08x, %08x",
-              FUNCTION_ADDR(target),
-              arg0,
-              arg1,
-              arg2,
-              arg3,
-              arg4,
-              arg5);
+              static_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2, arg3,
+              arg4, arg5);
           if (!stack_aligned) {
             PrintF(" with unaligned stack %08x\n", get_register(sp));
           }
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index b3c8eb4..71b8e40 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -68,7 +68,7 @@
 
 #include "src/arm/constants-arm.h"
 #include "src/assembler.h"
-#include "src/hashmap.h"
+#include "src/base/hashmap.h"
 
 namespace v8 {
 namespace internal {
@@ -200,7 +200,7 @@
   // Call on program start.
   static void Initialize(Isolate* isolate);
 
-  static void TearDown(HashMap* i_cache, Redirection* first);
+  static void TearDown(base::HashMap* i_cache, Redirection* first);
 
   // V8 generally calls into generated JS code with 5 parameters and into
   // generated RegExp code with 7 parameters. This is a convenience function,
@@ -222,8 +222,7 @@
   char* last_debugger_input() { return last_debugger_input_; }
 
   // ICache checking.
-  static void FlushICache(v8::internal::HashMap* i_cache, void* start,
-                          size_t size);
+  static void FlushICache(base::HashMap* i_cache, void* start, size_t size);
 
   // Returns true if pc register contains one of the 'special_values' defined
   // below (bad_lr, end_sim_pc).
@@ -342,10 +341,9 @@
   void InstructionDecode(Instruction* instr);
 
   // ICache.
-  static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
-  static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
-                           int size);
-  static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
+  static void CheckICache(base::HashMap* i_cache, Instruction* instr);
+  static void FlushOnePage(base::HashMap* i_cache, intptr_t start, int size);
+  static CachePage* GetCachePage(base::HashMap* i_cache, void* page);
 
   // Runtime call support.
   static void* RedirectExternalReference(
@@ -405,7 +403,7 @@
   char* last_debugger_input_;
 
   // Icache simulation
-  v8::internal::HashMap* i_cache_;
+  base::HashMap* i_cache_;
 
   // Registered breakpoints.
   Instruction* break_pc_;
diff --git a/src/arm64/assembler-arm64.cc b/src/arm64/assembler-arm64.cc
index 91563a4..7fd5a79 100644
--- a/src/arm64/assembler-arm64.cc
+++ b/src/arm64/assembler-arm64.cc
@@ -189,37 +189,25 @@
   return Memory::uint32_at(Assembler::target_pointer_address_at(pc_));
 }
 
-void RelocInfo::update_wasm_memory_reference(
-    Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
-    ICacheFlushMode icache_flush_mode) {
-  DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
-  if (IsWasmMemoryReference(rmode_) && old_base != new_base) {
-    Address updated_memory_reference;
-    DCHECK(old_base <= wasm_memory_reference() &&
-           wasm_memory_reference() < old_base + old_size);
-    updated_memory_reference = new_base + (wasm_memory_reference() - old_base);
-    DCHECK(new_base <= updated_memory_reference &&
-           updated_memory_reference < new_base + new_size);
-    Assembler::set_target_address_at(
-        isolate_, pc_, host_, updated_memory_reference, icache_flush_mode);
-  } else if (IsWasmMemorySizeReference(rmode_)) {
-    uint32_t updated_size_reference;
-    DCHECK(wasm_memory_size_reference() <= old_size);
-    updated_size_reference =
-        new_size + (wasm_memory_size_reference() - old_size);
-    DCHECK(updated_size_reference <= new_size);
-    Memory::uint32_at(Assembler::target_pointer_address_at(pc_)) =
-        updated_size_reference;
-  } else {
-    UNREACHABLE();
-  }
+Address RelocInfo::wasm_global_reference() {
+  DCHECK(IsWasmGlobalReference(rmode_));
+  return Memory::Address_at(Assembler::target_pointer_address_at(pc_));
+}
+
+void RelocInfo::unchecked_update_wasm_memory_reference(
+    Address address, ICacheFlushMode flush_mode) {
+  Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
+}
+
+void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
+                                                  ICacheFlushMode flush_mode) {
+  Memory::uint32_at(Assembler::target_pointer_address_at(pc_)) = size;
 }
 
 Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
                                               Register reg3, Register reg4) {
   CPURegList regs(reg1, reg2, reg3, reg4);
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
     int code = config->GetAllocatableDoubleCode(i);
     Register candidate = Register::from_code(code);
@@ -614,6 +602,8 @@
                          reloc_info_writer.pos());
     desc->origin = this;
     desc->constant_pool_size = 0;
+    desc->unwinding_info_size = 0;
+    desc->unwinding_info = nullptr;
   }
 }
 
@@ -982,14 +972,12 @@
 
 
 void Assembler::br(const Register& xn) {
-  positions_recorder()->WriteRecordedPositions();
   DCHECK(xn.Is64Bits());
   Emit(BR | Rn(xn));
 }
 
 
 void Assembler::blr(const Register& xn) {
-  positions_recorder()->WriteRecordedPositions();
   DCHECK(xn.Is64Bits());
   // The pattern 'blr xzr' is used as a guard to detect when execution falls
   // through the constant pool. It should not be emitted.
@@ -999,7 +987,6 @@
 
 
 void Assembler::ret(const Register& xn) {
-  positions_recorder()->WriteRecordedPositions();
   DCHECK(xn.Is64Bits());
   Emit(RET | Rn(xn));
 }
@@ -1011,7 +998,6 @@
 
 
 void Assembler::b(Label* label) {
-  positions_recorder()->WriteRecordedPositions();
   b(LinkAndGetInstructionOffsetTo(label));
 }
 
@@ -1022,47 +1008,40 @@
 
 
 void Assembler::b(Label* label, Condition cond) {
-  positions_recorder()->WriteRecordedPositions();
   b(LinkAndGetInstructionOffsetTo(label), cond);
 }
 
 
 void Assembler::bl(int imm26) {
-  positions_recorder()->WriteRecordedPositions();
   Emit(BL | ImmUncondBranch(imm26));
 }
 
 
 void Assembler::bl(Label* label) {
-  positions_recorder()->WriteRecordedPositions();
   bl(LinkAndGetInstructionOffsetTo(label));
 }
 
 
 void Assembler::cbz(const Register& rt,
                     int imm19) {
-  positions_recorder()->WriteRecordedPositions();
   Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
 }
 
 
 void Assembler::cbz(const Register& rt,
                     Label* label) {
-  positions_recorder()->WriteRecordedPositions();
   cbz(rt, LinkAndGetInstructionOffsetTo(label));
 }
 
 
 void Assembler::cbnz(const Register& rt,
                      int imm19) {
-  positions_recorder()->WriteRecordedPositions();
   Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
 }
 
 
 void Assembler::cbnz(const Register& rt,
                      Label* label) {
-  positions_recorder()->WriteRecordedPositions();
   cbnz(rt, LinkAndGetInstructionOffsetTo(label));
 }
 
@@ -1070,7 +1049,6 @@
 void Assembler::tbz(const Register& rt,
                     unsigned bit_pos,
                     int imm14) {
-  positions_recorder()->WriteRecordedPositions();
   DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
   Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
 }
@@ -1079,7 +1057,6 @@
 void Assembler::tbz(const Register& rt,
                     unsigned bit_pos,
                     Label* label) {
-  positions_recorder()->WriteRecordedPositions();
   tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
 }
 
@@ -1087,7 +1064,6 @@
 void Assembler::tbnz(const Register& rt,
                      unsigned bit_pos,
                      int imm14) {
-  positions_recorder()->WriteRecordedPositions();
   DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
   Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
 }
@@ -1096,7 +1072,6 @@
 void Assembler::tbnz(const Register& rt,
                      unsigned bit_pos,
                      Label* label) {
-  positions_recorder()->WriteRecordedPositions();
   tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
 }
 
@@ -1716,6 +1691,83 @@
   ldr_pcrel(rt, 0);
 }
 
+void Assembler::ldar(const Register& rt, const Register& rn) {
+  DCHECK(rn.Is64Bits());
+  LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAR_w : LDAR_x;
+  Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+}
+
+void Assembler::ldaxr(const Register& rt, const Register& rn) {
+  DCHECK(rn.Is64Bits());
+  LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAXR_w : LDAXR_x;
+  Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+}
+
+void Assembler::stlr(const Register& rt, const Register& rn) {
+  DCHECK(rn.Is64Bits());
+  LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLR_w : STLR_x;
+  Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+}
+
+void Assembler::stlxr(const Register& rs, const Register& rt,
+                      const Register& rn) {
+  DCHECK(rs.Is32Bits());
+  DCHECK(rn.Is64Bits());
+  LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLXR_w : STLXR_x;
+  Emit(op | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt));
+}
+
+void Assembler::ldarb(const Register& rt, const Register& rn) {
+  DCHECK(rt.Is32Bits());
+  DCHECK(rn.Is64Bits());
+  Emit(LDAR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+}
+
+void Assembler::ldaxrb(const Register& rt, const Register& rn) {
+  DCHECK(rt.Is32Bits());
+  DCHECK(rn.Is64Bits());
+  Emit(LDAXR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+}
+
+void Assembler::stlrb(const Register& rt, const Register& rn) {
+  DCHECK(rt.Is32Bits());
+  DCHECK(rn.Is64Bits());
+  Emit(STLR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+}
+
+void Assembler::stlxrb(const Register& rs, const Register& rt,
+                       const Register& rn) {
+  DCHECK(rs.Is32Bits());
+  DCHECK(rt.Is32Bits());
+  DCHECK(rn.Is64Bits());
+  Emit(STLXR_b | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt));
+}
+
+void Assembler::ldarh(const Register& rt, const Register& rn) {
+  DCHECK(rt.Is32Bits());
+  DCHECK(rn.Is64Bits());
+  Emit(LDAR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+}
+
+void Assembler::ldaxrh(const Register& rt, const Register& rn) {
+  DCHECK(rt.Is32Bits());
+  DCHECK(rn.Is64Bits());
+  Emit(LDAXR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+}
+
+void Assembler::stlrh(const Register& rt, const Register& rn) {
+  DCHECK(rt.Is32Bits());
+  DCHECK(rn.Is64Bits());
+  Emit(STLR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+}
+
+void Assembler::stlxrh(const Register& rs, const Register& rt,
+                       const Register& rn) {
+  DCHECK(rs.Is32Bits());
+  DCHECK(rt.Is32Bits());
+  DCHECK(rn.Is64Bits());
+  Emit(STLXR_h | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt));
+}
 
 void Assembler::mov(const Register& rd, const Register& rm) {
   // Moves involving the stack pointer are encoded as add immediate with
diff --git a/src/arm64/assembler-arm64.h b/src/arm64/assembler-arm64.h
index fac7a70..cc26278 100644
--- a/src/arm64/assembler-arm64.h
+++ b/src/arm64/assembler-arm64.h
@@ -154,8 +154,6 @@
     DCHECK(IsValidOrNone());
   }
 
-  const char* ToString();
-  bool IsAllocatable() const;
   bool IsValid() const {
     DCHECK(IsRegister() || IsNone());
     return IsValidRegister();
@@ -195,6 +193,7 @@
   // End of V8 compatibility section -----------------------
 };
 
+static const bool kSimpleFPAliasing = true;
 
 struct FPRegister : public CPURegister {
   enum Code {
@@ -230,8 +229,6 @@
     DCHECK(IsValidOrNone());
   }
 
-  const char* ToString();
-  bool IsAllocatable() const;
   bool IsValid() const {
     DCHECK(IsFPRegister() || IsNone());
     return IsValidFPRegister();
@@ -1401,6 +1398,42 @@
   // Load literal to register.
   void ldr(const CPURegister& rt, const Immediate& imm);
 
+  // Load-acquire word.
+  void ldar(const Register& rt, const Register& rn);
+
+  // Load-acquire exclusive word.
+  void ldaxr(const Register& rt, const Register& rn);
+
+  // Store-release word.
+  void stlr(const Register& rt, const Register& rn);
+
+  // Store-release exclusive word.
+  void stlxr(const Register& rs, const Register& rt, const Register& rn);
+
+  // Load-acquire byte.
+  void ldarb(const Register& rt, const Register& rn);
+
+  // Load-acquire exclusive byte.
+  void ldaxrb(const Register& rt, const Register& rn);
+
+  // Store-release byte.
+  void stlrb(const Register& rt, const Register& rn);
+
+  // Store-release exclusive byte.
+  void stlxrb(const Register& rs, const Register& rt, const Register& rn);
+
+  // Load-acquire half-word.
+  void ldarh(const Register& rt, const Register& rn);
+
+  // Load-acquire exclusive half-word.
+  void ldaxrh(const Register& rt, const Register& rn);
+
+  // Store-release half-word.
+  void stlrh(const Register& rt, const Register& rn);
+
+  // Store-release exclusive half-word.
+  void stlxrh(const Register& rs, const Register& rt, const Register& rn);
+
   // Move instructions. The default shift of -1 indicates that the move
   // instruction will calculate an appropriate 16-bit immediate and left shift
   // that is equal to the 64-bit immediate argument. If an explicit left shift
@@ -1695,6 +1728,11 @@
     return rt2.code() << Rt2_offset;
   }
 
+  static Instr Rs(CPURegister rs) {
+    DCHECK(rs.code() != kSPRegInternalCode);
+    return rs.code() << Rs_offset;
+  }
+
   // These encoding functions allow the stack pointer to be encoded, and
   // disallow the zero register.
   static Instr RdSP(Register rd) {
diff --git a/src/arm64/builtins-arm64.cc b/src/arm64/builtins-arm64.cc
index be372e6..e16897a 100644
--- a/src/arm64/builtins-arm64.cc
+++ b/src/arm64/builtins-arm64.cc
@@ -32,10 +32,7 @@
   __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
 }
 
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
-                                CFunctionId id,
-                                BuiltinExtraArguments extra_args) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
   // ----------- S t a t e -------------
   //  -- x0                 : number of arguments excluding receiver
   //  -- x1                 : target
@@ -54,23 +51,8 @@
   __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
 
   // Insert extra arguments.
-  int num_extra_args = 0;
-  switch (extra_args) {
-    case BuiltinExtraArguments::kTarget:
-      __ Push(x1);
-      ++num_extra_args;
-      break;
-    case BuiltinExtraArguments::kNewTarget:
-      __ Push(x3);
-      ++num_extra_args;
-      break;
-    case BuiltinExtraArguments::kTargetAndNewTarget:
-      __ Push(x1, x3);
-      num_extra_args += 2;
-      break;
-    case BuiltinExtraArguments::kNone:
-      break;
-  }
+  const int num_extra_args = 2;
+  __ Push(x1, x3);
 
   // JumpToExternalReference expects x0 to contain the number of arguments
   // including the receiver and the extra arguments.
@@ -141,6 +123,8 @@
 void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
   // ----------- S t a t e -------------
   //  -- x0                 : number of arguments
+  //  -- x1                 : function
+  //  -- cp                 : context
   //  -- lr                 : return address
   //  -- sp[(argc - n) * 8] : arg[n] (zero-based)
   //  -- sp[(argc + 1) * 8] : receiver
@@ -152,9 +136,9 @@
                                      : Heap::kMinusInfinityValueRootIndex;
 
   // Load the accumulator with the default return value (either -Infinity or
-  // +Infinity), with the tagged value in x1 and the double value in d1.
-  __ LoadRoot(x1, root_index);
-  __ Ldr(d1, FieldMemOperand(x1, HeapNumber::kValueOffset));
+  // +Infinity), with the tagged value in x5 and the double value in d5.
+  __ LoadRoot(x5, root_index);
+  __ Ldr(d5, FieldMemOperand(x5, HeapNumber::kValueOffset));
 
   // Remember how many slots to drop (including the receiver).
   __ Add(x4, x0, 1);
@@ -170,31 +154,34 @@
     __ Peek(x2, Operand(x0, LSL, kPointerSizeLog2));
 
     // Load the double value of the parameter into d2, maybe converting the
-    // parameter to a number first using the ToNumberStub if necessary.
+    // parameter to a number first using the ToNumber builtin if necessary.
     Label convert_smi, convert_number, done_convert;
     __ JumpIfSmi(x2, &convert_smi);
     __ JumpIfHeapNumber(x2, &convert_number);
     {
-      // Parameter is not a Number, use the ToNumberStub to convert it.
-      FrameScope scope(masm, StackFrame::INTERNAL);
+      // Parameter is not a Number, use the ToNumber builtin to convert it.
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ Push(lr, fp);
+      __ Move(fp, jssp);
+      __ Push(cp, x1);
       __ SmiTag(x0);
       __ SmiTag(x4);
-      __ Push(x0, x1, x4);
+      __ Push(x0, x5, x4);
       __ Mov(x0, x2);
-      ToNumberStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
       __ Mov(x2, x0);
-      __ Pop(x4, x1, x0);
+      __ Pop(x4, x5, x0);
       {
-        // Restore the double accumulator value (d1).
+        // Restore the double accumulator value (d5).
         Label done_restore;
-        __ SmiUntagToDouble(d1, x1, kSpeculativeUntag);
-        __ JumpIfSmi(x1, &done_restore);
-        __ Ldr(d1, FieldMemOperand(x1, HeapNumber::kValueOffset));
+        __ SmiUntagToDouble(d5, x5, kSpeculativeUntag);
+        __ JumpIfSmi(x5, &done_restore);
+        __ Ldr(d5, FieldMemOperand(x5, HeapNumber::kValueOffset));
         __ Bind(&done_restore);
       }
       __ SmiUntag(x4);
       __ SmiUntag(x0);
+      __ Pop(x1, cp, fp, lr);
     }
     __ AssertNumber(x2);
     __ JumpIfSmi(x2, &convert_smi);
@@ -209,22 +196,22 @@
 
     // We can use a single fmin/fmax for the operation itself, but we then need
     // to work out which HeapNumber (or smi) the result came from.
-    __ Fmov(x11, d1);
+    __ Fmov(x11, d5);
     if (kind == MathMaxMinKind::kMin) {
-      __ Fmin(d1, d1, d2);
+      __ Fmin(d5, d5, d2);
     } else {
       DCHECK(kind == MathMaxMinKind::kMax);
-      __ Fmax(d1, d1, d2);
+      __ Fmax(d5, d5, d2);
     }
-    __ Fmov(x10, d1);
+    __ Fmov(x10, d5);
     __ Cmp(x10, x11);
-    __ Csel(x1, x1, x2, eq);
+    __ Csel(x5, x5, x2, eq);
     __ B(&loop);
   }
 
   __ Bind(&done_loop);
-  __ Mov(x0, x1);
   __ Drop(x4);
+  __ Mov(x0, x5);
   __ Ret();
 }
 
@@ -250,8 +237,7 @@
   }
 
   // 2a. Convert first argument to number.
-  ToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
+  __ Jump(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
 
   // 2b. No arguments, return +0 (already in x0).
   __ Bind(&no_arguments);
@@ -299,8 +285,7 @@
       FrameScope scope(masm, StackFrame::INTERNAL);
       __ Push(x1, x3);
       __ Move(x0, x2);
-      ToNumberStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
       __ Move(x2, x0);
       __ Pop(x3, x1);
     }
@@ -715,8 +700,8 @@
   __ AssertGeneratorObject(x1);
 
   // Store input value into generator object.
-  __ Str(x0, FieldMemOperand(x1, JSGeneratorObject::kInputOffset));
-  __ RecordWriteField(x1, JSGeneratorObject::kInputOffset, x0, x3,
+  __ Str(x0, FieldMemOperand(x1, JSGeneratorObject::kInputOrDebugPosOffset));
+  __ RecordWriteField(x1, JSGeneratorObject::kInputOrDebugPosOffset, x0, x3,
                       kLRHasNotBeenSaved, kDontSaveFPRegs);
 
   // Store resume mode into generator object.
@@ -727,20 +712,23 @@
   __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
 
   // Flood function if we are stepping.
-  Label skip_flooding;
-  ExternalReference step_in_enabled =
-      ExternalReference::debug_step_in_enabled_address(masm->isolate());
-  __ Mov(x10, Operand(step_in_enabled));
-  __ Ldrb(x10, MemOperand(x10));
-  __ CompareAndBranch(x10, Operand(0), eq, &skip_flooding);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ Push(x1, x2, x4);
-    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
-    __ Pop(x2, x1);
-    __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
-  }
-  __ bind(&skip_flooding);
+  Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+  Label stepping_prepared;
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(masm->isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  __ Mov(x10, Operand(last_step_action));
+  __ Ldrsb(x10, MemOperand(x10));
+  __ CompareAndBranch(x10, Operand(StepIn), ge, &prepare_step_in_if_stepping);
+
+  // Flood function if we need to continue stepping in the suspended generator.
+  ExternalReference debug_suspended_generator =
+      ExternalReference::debug_suspended_generator_address(masm->isolate());
+  __ Mov(x10, Operand(debug_suspended_generator));
+  __ Ldr(x10, MemOperand(x10));
+  __ CompareAndBranch(x10, Operand(x1), eq,
+                      &prepare_step_in_suspended_generator);
+  __ Bind(&stepping_prepared);
 
   // Push receiver.
   __ Ldr(x5, FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
@@ -775,9 +763,8 @@
   // New-style (ignition/turbofan) generator object
   {
     __ Ldr(x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
-    __ Ldr(x0,
+    __ Ldr(w0,
          FieldMemOperand(x0, SharedFunctionInfo::kFormalParameterCountOffset));
-    __ SmiUntag(x0);
     // We abuse new.target both to indicate that this is a resume call and to
     // pass in the generator object.  In ordinary calls, new.target is always
     // undefined because generator functions are non-constructable.
@@ -829,6 +816,26 @@
     __ Move(x0, x1);  // Continuation expects generator object in x0.
     __ Br(x10);
   }
+
+  __ Bind(&prepare_step_in_if_stepping);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(x1, x2, x4);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(x2, x1);
+    __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
+  }
+  __ B(&stepping_prepared);
+
+  __ Bind(&prepare_step_in_suspended_generator);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(x1, x2);
+    __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+    __ Pop(x2, x1);
+    __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
+  }
+  __ B(&stepping_prepared);
 }
 
 enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
@@ -963,6 +970,22 @@
   Generate_JSEntryTrampolineHelper(masm, true);
 }
 
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
+  Register args_count = scratch;
+
+  // Get the arguments + receiver count.
+  __ ldr(args_count,
+         MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ Ldr(args_count.W(),
+         FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+  // Leave the frame (also dropping the register file).
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+
+  // Drop receiver + arguments.
+  __ Drop(args_count, 1);
+}
+
 // Generate code for entering a JS function with the interpreter.
 // On entry to the function the receiver and arguments have been pushed on the
 // stack left to right.  The actual argument count matches the formal parameter
@@ -1064,16 +1087,7 @@
   masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
 
   // The return value is in x0.
-
-  // Get the arguments + reciever count.
-  __ ldr(x1, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
-  __ Ldr(w1, FieldMemOperand(x1, BytecodeArray::kParameterSizeOffset));
-
-  // Leave the frame (also dropping the register file).
-  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
-
-  // Drop receiver + arguments and return.
-  __ Drop(x1, 1);
+  LeaveInterpreterFrame(masm, x2);
   __ Ret();
 
   // Load debug copy of the bytecode array.
@@ -1095,6 +1109,31 @@
   __ Jump(x7);
 }
 
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+  // Save the function and context for call to CompileBaseline.
+  __ ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+  __ ldr(kContextRegister,
+         MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+  // Leave the frame before recompiling for baseline so that we don't count as
+  // an activation on the stack.
+  LeaveInterpreterFrame(masm, x2);
+
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    // Push return value.
+    __ push(x0);
+
+    // Push function as argument and compile for baseline.
+    __ push(x1);
+    __ CallRuntime(Runtime::kCompileBaseline);
+
+    // Restore return value.
+    __ pop(x0);
+  }
+  __ Ret();
+}
+
 // static
 void Builtins::Generate_InterpreterPushArgsAndCallImpl(
     MacroAssembler* masm, TailCallMode tail_call_mode) {
@@ -1255,13 +1294,31 @@
   const int bailout_id = BailoutId::None().ToInt();
   __ Cmp(temp, Operand(Smi::FromInt(bailout_id)));
   __ B(ne, &loop_bottom);
+
   // Literals available?
+  Label got_literals, maybe_cleared_weakcell;
+  Register temp2 = x7;
   __ Ldr(temp, FieldMemOperand(array_pointer,
                                SharedFunctionInfo::kOffsetToPreviousLiterals));
+  // temp contains either a WeakCell pointing to the literals array or the
+  // literals array directly.
+  STATIC_ASSERT(WeakCell::kValueOffset == FixedArray::kLengthOffset);
+  __ Ldr(temp2, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ JumpIfSmi(temp2, &maybe_cleared_weakcell);
+  // temp2 is a pointer, therefore temp is a WeakCell pointing to a literals
+  // array.
   __ Ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
-  __ JumpIfSmi(temp, &gotta_call_runtime);
+  __ jmp(&got_literals);
+
+  // r4 is a smi. If it's 0, then we are looking at a cleared WeakCell
+  // around the literals array, and we should visit the runtime. If it's > 0,
+  // then temp already contains the literals array.
+  __ bind(&maybe_cleared_weakcell);
+  __ Cmp(temp2, Operand(Smi::FromInt(0)));
+  __ B(eq, &gotta_call_runtime);
 
   // Save the literals in the closure.
+  __ bind(&got_literals);
   __ Str(temp, FieldMemOperand(closure, JSFunction::kLiteralsOffset));
   __ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, x7,
                       kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
@@ -1671,6 +1728,9 @@
 void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
                                                int field_index) {
   // ----------- S t a t e -------------
+  //  -- x0      : number of arguments
+  //  -- x1      : function
+  //  -- cp      : context
   //  -- lr      : return address
   //  -- jssp[0] : receiver
   // -----------------------------------
@@ -1681,7 +1741,7 @@
   {
     __ Pop(x0);
     __ JumpIfSmi(x0, &receiver_not_date);
-    __ JumpIfNotObjectType(x0, x1, x2, JS_DATE_TYPE, &receiver_not_date);
+    __ JumpIfNotObjectType(x0, x2, x3, JS_DATE_TYPE, &receiver_not_date);
   }
 
   // 2. Load the specified date field, falling back to the runtime as necessary.
@@ -1709,7 +1769,14 @@
 
   // 3. Raise a TypeError if the receiver is not a date.
   __ Bind(&receiver_not_date);
-  __ TailCallRuntime(Runtime::kThrowNotDateError);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ Push(x0, lr, fp);
+    __ Move(fp, jssp);
+    __ Push(cp, x1);
+    __ Push(Smi::FromInt(0));
+    __ CallRuntime(Runtime::kThrowNotDateError);
+  }
 }
 
 // static
@@ -2711,6 +2778,82 @@
   __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
 }
 
+// static
+void Builtins::Generate_StringToNumber(MacroAssembler* masm) {
+  // The StringToNumber stub takes one argument in x0.
+  __ AssertString(x0);
+
+  // Check if string has a cached array index.
+  Label runtime;
+  __ Ldr(x2, FieldMemOperand(x0, String::kHashFieldOffset));
+  __ Tst(x2, Operand(String::kContainsCachedArrayIndexMask));
+  __ B(ne, &runtime);
+  __ IndexFromHash(x2, x0);
+  __ Ret();
+
+  __ Bind(&runtime);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    // Push argument.
+    __ Push(x0);
+    // We cannot use a tail call here because this builtin can also be called
+    // from wasm.
+    __ CallRuntime(Runtime::kStringToNumber);
+  }
+  __ Ret();
+}
+
+// static
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+  // The ToNumber stub takes one argument in x0.
+  Label not_smi;
+  __ JumpIfNotSmi(x0, &not_smi);
+  __ Ret();
+  __ Bind(&not_smi);
+
+  Label not_heap_number;
+  __ CompareObjectType(x0, x1, x1, HEAP_NUMBER_TYPE);
+  // x0: receiver
+  // x1: receiver instance type
+  __ B(ne, &not_heap_number);
+  __ Ret();
+  __ Bind(&not_heap_number);
+
+  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+          RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_NonNumberToNumber(MacroAssembler* masm) {
+  // The NonNumberToNumber stub takes one argument in x0.
+  __ AssertNotNumber(x0);
+
+  Label not_string;
+  __ CompareObjectType(x0, x1, x1, FIRST_NONSTRING_TYPE);
+  // x0: receiver
+  // x1: receiver instance type
+  __ B(hs, &not_string);
+  __ Jump(masm->isolate()->builtins()->StringToNumber(),
+          RelocInfo::CODE_TARGET);
+  __ Bind(&not_string);
+
+  Label not_oddball;
+  __ Cmp(x1, ODDBALL_TYPE);
+  __ B(ne, &not_oddball);
+  __ Ldr(x0, FieldMemOperand(x0, Oddball::kToNumberOffset));
+  __ Ret();
+  __ Bind(&not_oddball);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    // Push argument.
+    __ Push(x0);
+    // We cannot use a tail call here because this builtin can also be called
+    // from wasm.
+    __ CallRuntime(Runtime::kToNumber);
+  }
+  __ Ret();
+}
+
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
   // ----------- S t a t e -------------
diff --git a/src/arm64/code-stubs-arm64.cc b/src/arm64/code-stubs-arm64.cc
index a96b3df..6b03068 100644
--- a/src/arm64/code-stubs-arm64.cc
+++ b/src/arm64/code-stubs-arm64.cc
@@ -22,75 +22,28 @@
 namespace v8 {
 namespace internal {
 
+#define __ ACCESS_MASM(masm)
 
-static void InitializeArrayConstructorDescriptor(
-    Isolate* isolate, CodeStubDescriptor* descriptor,
-    int constant_stack_parameter_count) {
-  // cp: context
-  // x1: function
-  // x2: allocation site with elements kind
-  // x0: number of arguments to the constructor function
-  Address deopt_handler = Runtime::FunctionForId(
-      Runtime::kArrayConstructor)->entry;
-
-  if (constant_stack_parameter_count == 0) {
-    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  } else {
-    descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  }
+void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
+  __ Mov(x5, Operand(x0, LSL, kPointerSizeLog2));
+  __ Str(x1, MemOperand(jssp, x5));
+  __ Push(x1);
+  __ Push(x2);
+  __ Add(x0, x0, Operand(3));
+  __ TailCallRuntime(Runtime::kNewArray);
 }
 
-
-void ArraySingleArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
-static void InitializeInternalArrayConstructorDescriptor(
-    Isolate* isolate, CodeStubDescriptor* descriptor,
-    int constant_stack_parameter_count) {
-  Address deopt_handler = Runtime::FunctionForId(
-      Runtime::kInternalArrayConstructor)->entry;
-
-  if (constant_stack_parameter_count == 0) {
-    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  } else {
-    descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  }
-}
-
-
 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
   Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
   descriptor->Initialize(x0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
 }
 
-void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+void FastFunctionBindStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
+  Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
+  descriptor->Initialize(x0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
 }
 
-
-void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-
 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
                                                ExternalReference miss) {
   // Update the static counter each time a new code stub is generated.
@@ -986,7 +939,7 @@
   CEntryStub::GenerateAheadOfTime(isolate);
   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
-  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+  CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
   CreateWeakCellStub::GenerateAheadOfTime(isolate);
   BinaryOpICStub::GenerateAheadOfTime(isolate);
@@ -1490,7 +1443,6 @@
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
                                           &miss,  // When index out of range.
-                                          STRING_INDEX_IS_ARRAY_INDEX,
                                           RECEIVER_IS_STRING);
   char_at_generator.GenerateFast(masm);
   __ Ret();
@@ -2022,6 +1974,7 @@
   //  feedback_vector : the feedback vector
   //  index :           slot in feedback vector (smi)
   Label initialize, done, miss, megamorphic, not_array_function;
+  Label done_initialize_count, done_increment_count;
 
   DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
             masm->isolate()->heap()->megamorphic_symbol());
@@ -2044,7 +1997,7 @@
   Label check_allocation_site;
   __ Ldr(feedback_value, FieldMemOperand(feedback, WeakCell::kValueOffset));
   __ Cmp(function, feedback_value);
-  __ B(eq, &done);
+  __ B(eq, &done_increment_count);
   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
   __ B(eq, &done);
   __ Ldr(feedback_map, FieldMemOperand(feedback, HeapObject::kMapOffset));
@@ -2066,7 +2019,7 @@
   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
   __ Cmp(function, scratch1);
   __ B(ne, &megamorphic);
-  __ B(&done);
+  __ B(&done_increment_count);
 
   __ Bind(&miss);
 
@@ -2097,12 +2050,32 @@
   CreateAllocationSiteStub create_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &create_stub, argc, function,
                              feedback_vector, index, new_target);
-  __ B(&done);
+  __ B(&done_initialize_count);
 
   __ Bind(&not_array_function);
   CreateWeakCellStub weak_cell_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &weak_cell_stub, argc, function,
                              feedback_vector, index, new_target);
+
+  __ bind(&done_initialize_count);
+  // Initialize the call counter.
+  __ Mov(scratch1, Operand(Smi::FromInt(1)));
+  __ Adds(scratch2, feedback_vector,
+          Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+  __ Str(scratch1,
+         FieldMemOperand(scratch2, FixedArray::kHeaderSize + kPointerSize));
+  __ b(&done);
+
+  __ bind(&done_increment_count);
+
+  // Increment the call count for monomorphic function calls.
+  __ Add(scratch1, feedback_vector,
+         Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+  __ Add(scratch1, scratch1, Operand(FixedArray::kHeaderSize + kPointerSize));
+  __ Ldr(scratch2, FieldMemOperand(scratch1, 0));
+  __ Add(scratch2, scratch2, Operand(Smi::FromInt(1)));
+  __ Str(scratch2, FieldMemOperand(scratch1, 0));
+
   __ Bind(&done);
 }
 
@@ -2175,7 +2148,7 @@
   __ Add(feedback_vector, feedback_vector,
          Operand(FixedArray::kHeaderSize + kPointerSize));
   __ Ldr(index, FieldMemOperand(feedback_vector, 0));
-  __ Add(index, index, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+  __ Add(index, index, Operand(Smi::FromInt(1)));
   __ Str(index, FieldMemOperand(feedback_vector, 0));
 
   // Set up arguments for the array constructor stub.
@@ -2235,7 +2208,7 @@
   __ Add(feedback_vector, feedback_vector,
          Operand(FixedArray::kHeaderSize + kPointerSize));
   __ Ldr(index, FieldMemOperand(feedback_vector, 0));
-  __ Add(index, index, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+  __ Add(index, index, Operand(Smi::FromInt(1)));
   __ Str(index, FieldMemOperand(feedback_vector, 0));
 
   __ Bind(&call_function);
@@ -2300,7 +2273,7 @@
   __ B(ne, &miss);
 
   // Initialize the call counter.
-  __ Mov(x5, Smi::FromInt(CallICNexus::kCallCountIncrement));
+  __ Mov(x5, Smi::FromInt(1));
   __ Adds(x4, feedback_vector,
           Operand::UntagSmiAndScale(index, kPointerSizeLog2));
   __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize + kPointerSize));
@@ -2394,13 +2367,7 @@
     // Save object_ on the stack and pass index_ as argument for runtime call.
     __ Push(object_, index_);
   }
-  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
-    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
-  } else {
-    DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
-    // NumberToSmi discards numbers that are not exact integers.
-    __ CallRuntime(Runtime::kNumberToSmi);
-  }
+  __ CallRuntime(Runtime::kNumberToSmi);
   // Save the conversion result before the pop instructions below
   // have a chance to overwrite it.
   __ Mov(index_, x0);
@@ -3086,74 +3053,13 @@
   __ SmiTag(from);
   StringCharAtGenerator generator(input_string, from, result_length, x0,
                                   &runtime, &runtime, &runtime,
-                                  STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
+                                  RECEIVER_IS_STRING);
   generator.GenerateFast(masm);
   __ Drop(3);
   __ Ret();
   generator.SkipSlow(masm, &runtime);
 }
 
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
-  // The ToNumber stub takes one argument in x0.
-  Label not_smi;
-  __ JumpIfNotSmi(x0, &not_smi);
-  __ Ret();
-  __ Bind(&not_smi);
-
-  Label not_heap_number;
-  __ CompareObjectType(x0, x1, x1, HEAP_NUMBER_TYPE);
-  // x0: receiver
-  // x1: receiver instance type
-  __ B(ne, &not_heap_number);
-  __ Ret();
-  __ Bind(&not_heap_number);
-
-  NonNumberToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
-}
-
-void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
-  // The NonNumberToNumber stub takes one argument in x0.
-  __ AssertNotNumber(x0);
-
-  Label not_string;
-  __ CompareObjectType(x0, x1, x1, FIRST_NONSTRING_TYPE);
-  // x0: receiver
-  // x1: receiver instance type
-  __ B(hs, &not_string);
-  StringToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
-  __ Bind(&not_string);
-
-  Label not_oddball;
-  __ Cmp(x1, ODDBALL_TYPE);
-  __ B(ne, &not_oddball);
-  __ Ldr(x0, FieldMemOperand(x0, Oddball::kToNumberOffset));
-  __ Ret();
-  __ Bind(&not_oddball);
-
-  __ Push(x0);  // Push argument.
-  __ TailCallRuntime(Runtime::kToNumber);
-}
-
-void StringToNumberStub::Generate(MacroAssembler* masm) {
-  // The StringToNumber stub takes one argument in x0.
-  __ AssertString(x0);
-
-  // Check if string has a cached array index.
-  Label runtime;
-  __ Ldr(x2, FieldMemOperand(x0, String::kHashFieldOffset));
-  __ Tst(x2, Operand(String::kContainsCachedArrayIndexMask));
-  __ B(ne, &runtime);
-  __ IndexFromHash(x2, x0);
-  __ Ret();
-
-  __ Bind(&runtime);
-  __ Push(x0);  // Push argument.
-  __ TailCallRuntime(Runtime::kStringToNumber);
-}
-
 void ToStringStub::Generate(MacroAssembler* masm) {
   // The ToString stub takes one argument in x0.
   Label is_number;
@@ -3536,14 +3442,14 @@
 
 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  LoadICStub stub(isolate(), state());
+  LoadICStub stub(isolate());
   stub.GenerateForTrampoline(masm);
 }
 
 
 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  KeyedLoadICStub stub(isolate(), state());
+  KeyedLoadICStub stub(isolate());
   stub.GenerateForTrampoline(masm);
 }
 
@@ -4443,19 +4349,13 @@
   }
 }
 
-
-void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
       isolate);
   ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
       isolate);
-  ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
-      isolate);
-}
-
-
-void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
-    Isolate* isolate) {
+  ArrayNArgumentsConstructorStub stub(isolate);
+  stub.GetCode();
   ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
   for (int i = 0; i < 2; i++) {
     // For internal arrays we only need a few things
@@ -4463,8 +4363,6 @@
     stubh1.GetCode();
     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
     stubh2.GetCode();
-    InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
-    stubh3.GetCode();
   }
 }
 
@@ -4488,14 +4386,15 @@
 
     __ Bind(&n_case);
     // N arguments.
-    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
-
+    ArrayNArgumentsConstructorStub stub(masm->isolate());
+    __ TailCallStub(&stub);
   } else if (argument_count() == NONE) {
     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
   } else if (argument_count() == ONE) {
     CreateArrayDispatchOneArgument(masm, mode);
   } else if (argument_count() == MORE_THAN_ONE) {
-    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+    ArrayNArgumentsConstructorStub stub(masm->isolate());
+    __ TailCallStub(&stub);
   } else {
     UNREACHABLE();
   }
@@ -4610,7 +4509,7 @@
 
   __ Bind(&n_case);
   // N arguments.
-  InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+  ArrayNArgumentsConstructorStub stubN(isolate());
   __ TailCallStub(&stubN);
 }
 
@@ -4823,10 +4722,10 @@
   Label rest_parameters;
   __ Ldrsw(x0, UntagSmiMemOperand(
                    x2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ Ldr(x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+  __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
   __ Ldrsw(
-      x1, FieldMemOperand(x1, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ Subs(x0, x0, x1);
+      x3, FieldMemOperand(x3, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ Subs(x0, x0, x3);
   __ B(gt, &rest_parameters);
 
   // Return an empty rest parameter array.
@@ -4872,15 +4771,16 @@
     // ----------- S t a t e -------------
     //  -- cp : context
     //  -- x0 : number of rest parameters
+    //  -- x1 : function
     //  -- x2 : pointer to first rest parameters
     //  -- lr : return address
     // -----------------------------------
 
     // Allocate space for the rest parameter array plus the backing store.
     Label allocate, done_allocate;
-    __ Mov(x1, JSArray::kSize + FixedArray::kHeaderSize);
-    __ Add(x1, x1, Operand(x0, LSL, kPointerSizeLog2));
-    __ Allocate(x1, x3, x4, x5, &allocate, NO_ALLOCATION_FLAGS);
+    __ Mov(x6, JSArray::kSize + FixedArray::kHeaderSize);
+    __ Add(x6, x6, Operand(x0, LSL, kPointerSizeLog2));
+    __ Allocate(x6, x3, x4, x5, &allocate, NO_ALLOCATION_FLAGS);
     __ Bind(&done_allocate);
 
     // Compute arguments.length in x6.
@@ -4915,19 +4815,27 @@
     STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
     __ Ret();
 
-    // Fall back to %AllocateInNewSpace.
+    // Fall back to %AllocateInNewSpace (if not too big).
+    Label too_big_for_new_space;
     __ Bind(&allocate);
+    __ Cmp(x6, Operand(Page::kMaxRegularHeapObjectSize));
+    __ B(gt, &too_big_for_new_space);
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
       __ SmiTag(x0);
-      __ SmiTag(x1);
-      __ Push(x0, x2, x1);
+      __ SmiTag(x6);
+      __ Push(x0, x2, x6);
       __ CallRuntime(Runtime::kAllocateInNewSpace);
       __ Mov(x3, x0);
       __ Pop(x2, x0);
       __ SmiUntag(x0);
     }
     __ B(&done_allocate);
+
+    // Fall back to %NewRestParameter.
+    __ Bind(&too_big_for_new_space);
+    __ Push(x1);
+    __ TailCallRuntime(Runtime::kNewRestParameter);
   }
 }
 
@@ -5264,9 +5172,9 @@
   __ Cmp(x4, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ B(eq, &arguments_adaptor);
   {
-    __ Ldr(x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+    __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
     __ Ldrsw(x0, FieldMemOperand(
-                     x1, SharedFunctionInfo::kFormalParameterCountOffset));
+                     x4, SharedFunctionInfo::kFormalParameterCountOffset));
     __ Add(x2, x2, Operand(x0, LSL, kPointerSizeLog2));
     __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
   }
@@ -5283,15 +5191,16 @@
   // ----------- S t a t e -------------
   //  -- cp : context
   //  -- x0 : number of rest parameters
+  //  -- x1 : function
   //  -- x2 : pointer to first rest parameters
   //  -- lr : return address
   // -----------------------------------
 
   // Allocate space for the strict arguments object plus the backing store.
   Label allocate, done_allocate;
-  __ Mov(x1, JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize);
-  __ Add(x1, x1, Operand(x0, LSL, kPointerSizeLog2));
-  __ Allocate(x1, x3, x4, x5, &allocate, NO_ALLOCATION_FLAGS);
+  __ Mov(x6, JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize);
+  __ Add(x6, x6, Operand(x0, LSL, kPointerSizeLog2));
+  __ Allocate(x6, x3, x4, x5, &allocate, NO_ALLOCATION_FLAGS);
   __ Bind(&done_allocate);
 
   // Compute arguments.length in x6.
@@ -5326,48 +5235,27 @@
   STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
   __ Ret();
 
-  // Fall back to %AllocateInNewSpace.
+  // Fall back to %AllocateInNewSpace (if not too big).
+  Label too_big_for_new_space;
   __ Bind(&allocate);
+  __ Cmp(x6, Operand(Page::kMaxRegularHeapObjectSize));
+  __ B(gt, &too_big_for_new_space);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     __ SmiTag(x0);
-    __ SmiTag(x1);
-    __ Push(x0, x2, x1);
+    __ SmiTag(x6);
+    __ Push(x0, x2, x6);
     __ CallRuntime(Runtime::kAllocateInNewSpace);
     __ Mov(x3, x0);
     __ Pop(x2, x0);
     __ SmiUntag(x0);
   }
   __ B(&done_allocate);
-}
 
-
-void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
-  Register context = cp;
-  Register result = x0;
-  Register slot = x2;
-  Label slow_case;
-
-  // Go up the context chain to the script context.
-  for (int i = 0; i < depth(); ++i) {
-    __ Ldr(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
-    context = result;
-  }
-
-  // Load the PropertyCell value at the specified slot.
-  __ Add(result, context, Operand(slot, LSL, kPointerSizeLog2));
-  __ Ldr(result, ContextMemOperand(result));
-  __ Ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
-
-  // If the result is not the_hole, return. Otherwise, handle in the runtime.
-  __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &slow_case);
-  __ Ret();
-
-  // Fallback to runtime.
-  __ Bind(&slow_case);
-  __ SmiTag(slot);
-  __ Push(slot);
-  __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
+  // Fall back to %NewStrictArguments.
+  __ Bind(&too_big_for_new_space);
+  __ Push(x1);
+  __ TailCallRuntime(Runtime::kNewStrictArguments);
 }
 
 
diff --git a/src/arm64/codegen-arm64.cc b/src/arm64/codegen-arm64.cc
index 990dd41..edd2899 100644
--- a/src/arm64/codegen-arm64.cc
+++ b/src/arm64/codegen-arm64.cc
@@ -15,66 +15,6 @@
 
 #define __ ACCESS_MASM(masm)
 
-#if defined(USE_SIMULATOR)
-byte* fast_exp_arm64_machine_code = nullptr;
-double fast_exp_simulator(double x, Isolate* isolate) {
-  Simulator * simulator = Simulator::current(isolate);
-  Simulator::CallArgument args[] = {
-      Simulator::CallArgument(x),
-      Simulator::CallArgument::End()
-  };
-  return simulator->CallDouble(fast_exp_arm64_machine_code, args);
-}
-#endif
-
-
-UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
-  // Use the Math.exp implemetation in MathExpGenerator::EmitMathExp() to create
-  // an AAPCS64-compliant exp() function. This will be faster than the C
-  // library's exp() function, but probably less accurate.
-  size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
-  if (buffer == nullptr) return nullptr;
-
-  ExternalReference::InitializeMathExpData();
-  MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
-                      CodeObjectRequired::kNo);
-  masm.SetStackPointer(csp);
-
-  // The argument will be in d0 on entry.
-  DoubleRegister input = d0;
-  // Use other caller-saved registers for all other values.
-  DoubleRegister result = d1;
-  DoubleRegister double_temp1 = d2;
-  DoubleRegister double_temp2 = d3;
-  Register temp1 = x10;
-  Register temp2 = x11;
-  Register temp3 = x12;
-
-  MathExpGenerator::EmitMathExp(&masm, input, result,
-                                double_temp1, double_temp2,
-                                temp1, temp2, temp3);
-  // Move the result to the return register.
-  masm.Fmov(d0, result);
-  masm.Ret();
-
-  CodeDesc desc;
-  masm.GetCode(&desc);
-  DCHECK(!RelocInfo::RequiresRelocation(desc));
-
-  Assembler::FlushICache(isolate, buffer, actual_size);
-  base::OS::ProtectCode(buffer, actual_size);
-
-#if !defined(USE_SIMULATOR)
-  return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
-#else
-  fast_exp_arm64_machine_code = buffer;
-  return &fast_exp_simulator;
-#endif
-}
-
-
 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
   return nullptr;
 }
@@ -510,127 +450,6 @@
   __ Bind(&done);
 }
 
-
-static MemOperand ExpConstant(Register base, int index) {
-  return MemOperand(base, index * kDoubleSize);
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
-                                   DoubleRegister input,
-                                   DoubleRegister result,
-                                   DoubleRegister double_temp1,
-                                   DoubleRegister double_temp2,
-                                   Register temp1,
-                                   Register temp2,
-                                   Register temp3) {
-  // TODO(jbramley): There are several instances where fnmsub could be used
-  // instead of fmul and fsub. Doing this changes the result, but since this is
-  // an estimation anyway, does it matter?
-
-  DCHECK(!AreAliased(input, result,
-                     double_temp1, double_temp2,
-                     temp1, temp2, temp3));
-  DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
-  DCHECK(!masm->serializer_enabled());  // External references not serializable.
-
-  Label done;
-  DoubleRegister double_temp3 = result;
-  Register constants = temp3;
-
-  // The algorithm used relies on some magic constants which are initialized in
-  // ExternalReference::InitializeMathExpData().
-
-  // Load the address of the start of the array.
-  __ Mov(constants, ExternalReference::math_exp_constants(0));
-
-  // We have to do a four-way split here:
-  //  - If input <= about -708.4, the output always rounds to zero.
-  //  - If input >= about 709.8, the output always rounds to +infinity.
-  //  - If the input is NaN, the output is NaN.
-  //  - Otherwise, the result needs to be calculated.
-  Label result_is_finite_non_zero;
-  // Assert that we can load offset 0 (the small input threshold) and offset 1
-  // (the large input threshold) with a single ldp.
-  DCHECK(kDRegSize == (ExpConstant(constants, 1).offset() -
-                              ExpConstant(constants, 0).offset()));
-  __ Ldp(double_temp1, double_temp2, ExpConstant(constants, 0));
-
-  __ Fcmp(input, double_temp1);
-  __ Fccmp(input, double_temp2, NoFlag, hi);
-  // At this point, the condition flags can be in one of five states:
-  //  NZCV
-  //  1000      -708.4 < input < 709.8    result = exp(input)
-  //  0110      input == 709.8            result = +infinity
-  //  0010      input > 709.8             result = +infinity
-  //  0011      input is NaN              result = input
-  //  0000      input <= -708.4           result = +0.0
-
-  // Continue the common case first. 'mi' tests N == 1.
-  __ B(&result_is_finite_non_zero, mi);
-
-  // TODO(jbramley): Consider adding a +infinity register for ARM64.
-  __ Ldr(double_temp2, ExpConstant(constants, 2));    // Synthesize +infinity.
-
-  // Select between +0.0 and +infinity. 'lo' tests C == 0.
-  __ Fcsel(result, fp_zero, double_temp2, lo);
-  // Select between {+0.0 or +infinity} and input. 'vc' tests V == 0.
-  __ Fcsel(result, result, input, vc);
-  __ B(&done);
-
-  // The rest is magic, as described in InitializeMathExpData().
-  __ Bind(&result_is_finite_non_zero);
-
-  // Assert that we can load offset 3 and offset 4 with a single ldp.
-  DCHECK(kDRegSize == (ExpConstant(constants, 4).offset() -
-                              ExpConstant(constants, 3).offset()));
-  __ Ldp(double_temp1, double_temp3, ExpConstant(constants, 3));
-  __ Fmadd(double_temp1, double_temp1, input, double_temp3);
-  __ Fmov(temp2.W(), double_temp1.S());
-  __ Fsub(double_temp1, double_temp1, double_temp3);
-
-  // Assert that we can load offset 5 and offset 6 with a single ldp.
-  DCHECK(kDRegSize == (ExpConstant(constants, 6).offset() -
-                              ExpConstant(constants, 5).offset()));
-  __ Ldp(double_temp2, double_temp3, ExpConstant(constants, 5));
-  // TODO(jbramley): Consider using Fnmsub here.
-  __ Fmul(double_temp1, double_temp1, double_temp2);
-  __ Fsub(double_temp1, double_temp1, input);
-
-  __ Fmul(double_temp2, double_temp1, double_temp1);
-  __ Fsub(double_temp3, double_temp3, double_temp1);
-  __ Fmul(double_temp3, double_temp3, double_temp2);
-
-  __ Mov(temp1.W(), Operand(temp2.W(), LSR, 11));
-
-  __ Ldr(double_temp2, ExpConstant(constants, 7));
-  // TODO(jbramley): Consider using Fnmsub here.
-  __ Fmul(double_temp3, double_temp3, double_temp2);
-  __ Fsub(double_temp3, double_temp3, double_temp1);
-
-  // The 8th constant is 1.0, so use an immediate move rather than a load.
-  // We can't generate a runtime assertion here as we would need to call Abort
-  // in the runtime and we don't have an Isolate when we generate this code.
-  __ Fmov(double_temp2, 1.0);
-  __ Fadd(double_temp3, double_temp3, double_temp2);
-
-  __ And(temp2, temp2, 0x7ff);
-  __ Add(temp1, temp1, 0x3ff);
-
-  // Do the final table lookup.
-  __ Mov(temp3, ExternalReference::math_exp_log_table());
-
-  __ Add(temp3, temp3, Operand(temp2, LSL, kDRegSizeLog2));
-  __ Ldp(temp2.W(), temp3.W(), MemOperand(temp3));
-  __ Orr(temp1.W(), temp3.W(), Operand(temp1.W(), LSL, 20));
-  __ Bfi(temp2, temp1, 32, 32);
-  __ Fmov(double_temp1, temp2);
-
-  __ Fmul(result, double_temp3, double_temp1);
-
-  __ Bind(&done);
-}
-
 #undef __
 
 }  // namespace internal
diff --git a/src/arm64/codegen-arm64.h b/src/arm64/codegen-arm64.h
index 573f6fe..b0490a8 100644
--- a/src/arm64/codegen-arm64.h
+++ b/src/arm64/codegen-arm64.h
@@ -27,22 +27,6 @@
   DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
 };
 
-
-class MathExpGenerator : public AllStatic {
- public:
-  static void EmitMathExp(MacroAssembler* masm,
-                          DoubleRegister input,
-                          DoubleRegister result,
-                          DoubleRegister double_scratch1,
-                          DoubleRegister double_scratch2,
-                          Register temp1,
-                          Register temp2,
-                          Register temp3);
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/arm64/constants-arm64.h b/src/arm64/constants-arm64.h
index 00b24e9..65b8b30 100644
--- a/src/arm64/constants-arm64.h
+++ b/src/arm64/constants-arm64.h
@@ -117,89 +117,89 @@
 const unsigned kFloatMantissaBits = 23;
 const unsigned kFloatExponentBits = 8;
 
-#define INSTRUCTION_FIELDS_LIST(V_)                                            \
-/* Register fields */                                                          \
-V_(Rd, 4, 0, Bits)                        /* Destination register.     */      \
-V_(Rn, 9, 5, Bits)                        /* First source register.    */      \
-V_(Rm, 20, 16, Bits)                      /* Second source register.   */      \
-V_(Ra, 14, 10, Bits)                      /* Third source register.    */      \
-V_(Rt, 4, 0, Bits)                        /* Load dest / store source. */      \
-V_(Rt2, 14, 10, Bits)                     /* Load second dest /        */      \
-                                         /* store second source.      */       \
-V_(PrefetchMode, 4, 0, Bits)                                                   \
-                                                                               \
-/* Common bits */                                                              \
-V_(SixtyFourBits, 31, 31, Bits)                                                \
-V_(FlagsUpdate, 29, 29, Bits)                                                  \
-                                                                               \
-/* PC relative addressing */                                                   \
-V_(ImmPCRelHi, 23, 5, SignedBits)                                              \
-V_(ImmPCRelLo, 30, 29, Bits)                                                   \
-                                                                               \
-/* Add/subtract/logical shift register */                                      \
-V_(ShiftDP, 23, 22, Bits)                                                      \
-V_(ImmDPShift, 15, 10, Bits)                                                   \
-                                                                               \
-/* Add/subtract immediate */                                                   \
-V_(ImmAddSub, 21, 10, Bits)                                                    \
-V_(ShiftAddSub, 23, 22, Bits)                                                  \
-                                                                               \
-/* Add/substract extend */                                                     \
-V_(ImmExtendShift, 12, 10, Bits)                                               \
-V_(ExtendMode, 15, 13, Bits)                                                   \
-                                                                               \
-/* Move wide */                                                                \
-V_(ImmMoveWide, 20, 5, Bits)                                                   \
-V_(ShiftMoveWide, 22, 21, Bits)                                                \
-                                                                               \
-/* Logical immediate, bitfield and extract */                                  \
-V_(BitN, 22, 22, Bits)                                                         \
-V_(ImmRotate, 21, 16, Bits)                                                    \
-V_(ImmSetBits, 15, 10, Bits)                                                   \
-V_(ImmR, 21, 16, Bits)                                                         \
-V_(ImmS, 15, 10, Bits)                                                         \
-                                                                               \
-/* Test and branch immediate */                                                \
-V_(ImmTestBranch, 18, 5, SignedBits)                                           \
-V_(ImmTestBranchBit40, 23, 19, Bits)                                           \
-V_(ImmTestBranchBit5, 31, 31, Bits)                                            \
-                                                                               \
-/* Conditionals */                                                             \
-V_(Condition, 15, 12, Bits)                                                    \
-V_(ConditionBranch, 3, 0, Bits)                                                \
-V_(Nzcv, 3, 0, Bits)                                                           \
-V_(ImmCondCmp, 20, 16, Bits)                                                   \
-V_(ImmCondBranch, 23, 5, SignedBits)                                           \
-                                                                               \
-/* Floating point */                                                           \
-V_(FPType, 23, 22, Bits)                                                       \
-V_(ImmFP, 20, 13, Bits)                                                        \
-V_(FPScale, 15, 10, Bits)                                                      \
-                                                                               \
-/* Load Store */                                                               \
-V_(ImmLS, 20, 12, SignedBits)                                                  \
-V_(ImmLSUnsigned, 21, 10, Bits)                                                \
-V_(ImmLSPair, 21, 15, SignedBits)                                              \
-V_(SizeLS, 31, 30, Bits)                                                       \
-V_(ImmShiftLS, 12, 12, Bits)                                                   \
-                                                                               \
-/* Other immediates */                                                         \
-V_(ImmUncondBranch, 25, 0, SignedBits)                                         \
-V_(ImmCmpBranch, 23, 5, SignedBits)                                            \
-V_(ImmLLiteral, 23, 5, SignedBits)                                             \
-V_(ImmException, 20, 5, Bits)                                                  \
-V_(ImmHint, 11, 5, Bits)                                                       \
-V_(ImmBarrierDomain, 11, 10, Bits)                                             \
-V_(ImmBarrierType, 9, 8, Bits)                                                 \
-                                                                               \
-/* System (MRS, MSR) */                                                        \
-V_(ImmSystemRegister, 19, 5, Bits)                                             \
-V_(SysO0, 19, 19, Bits)                                                        \
-V_(SysOp1, 18, 16, Bits)                                                       \
-V_(SysOp2, 7, 5, Bits)                                                         \
-V_(CRn, 15, 12, Bits)                                                          \
-V_(CRm, 11, 8, Bits)                                                           \
-
+#define INSTRUCTION_FIELDS_LIST(V_)                     \
+  /* Register fields */                                 \
+  V_(Rd, 4, 0, Bits)    /* Destination register.     */ \
+  V_(Rn, 9, 5, Bits)    /* First source register.    */ \
+  V_(Rm, 20, 16, Bits)  /* Second source register.   */ \
+  V_(Ra, 14, 10, Bits)  /* Third source register.    */ \
+  V_(Rt, 4, 0, Bits)    /* Load dest / store source. */ \
+  V_(Rt2, 14, 10, Bits) /* Load second dest /        */ \
+                        /* store second source.      */ \
+  V_(Rs, 20, 16, Bits)  /* Store-exclusive status */    \
+  V_(PrefetchMode, 4, 0, Bits)                          \
+                                                        \
+  /* Common bits */                                     \
+  V_(SixtyFourBits, 31, 31, Bits)                       \
+  V_(FlagsUpdate, 29, 29, Bits)                         \
+                                                        \
+  /* PC relative addressing */                          \
+  V_(ImmPCRelHi, 23, 5, SignedBits)                     \
+  V_(ImmPCRelLo, 30, 29, Bits)                          \
+                                                        \
+  /* Add/subtract/logical shift register */             \
+  V_(ShiftDP, 23, 22, Bits)                             \
+  V_(ImmDPShift, 15, 10, Bits)                          \
+                                                        \
+  /* Add/subtract immediate */                          \
+  V_(ImmAddSub, 21, 10, Bits)                           \
+  V_(ShiftAddSub, 23, 22, Bits)                         \
+                                                        \
+  /* Add/substract extend */                            \
+  V_(ImmExtendShift, 12, 10, Bits)                      \
+  V_(ExtendMode, 15, 13, Bits)                          \
+                                                        \
+  /* Move wide */                                       \
+  V_(ImmMoveWide, 20, 5, Bits)                          \
+  V_(ShiftMoveWide, 22, 21, Bits)                       \
+                                                        \
+  /* Logical immediate, bitfield and extract */         \
+  V_(BitN, 22, 22, Bits)                                \
+  V_(ImmRotate, 21, 16, Bits)                           \
+  V_(ImmSetBits, 15, 10, Bits)                          \
+  V_(ImmR, 21, 16, Bits)                                \
+  V_(ImmS, 15, 10, Bits)                                \
+                                                        \
+  /* Test and branch immediate */                       \
+  V_(ImmTestBranch, 18, 5, SignedBits)                  \
+  V_(ImmTestBranchBit40, 23, 19, Bits)                  \
+  V_(ImmTestBranchBit5, 31, 31, Bits)                   \
+                                                        \
+  /* Conditionals */                                    \
+  V_(Condition, 15, 12, Bits)                           \
+  V_(ConditionBranch, 3, 0, Bits)                       \
+  V_(Nzcv, 3, 0, Bits)                                  \
+  V_(ImmCondCmp, 20, 16, Bits)                          \
+  V_(ImmCondBranch, 23, 5, SignedBits)                  \
+                                                        \
+  /* Floating point */                                  \
+  V_(FPType, 23, 22, Bits)                              \
+  V_(ImmFP, 20, 13, Bits)                               \
+  V_(FPScale, 15, 10, Bits)                             \
+                                                        \
+  /* Load Store */                                      \
+  V_(ImmLS, 20, 12, SignedBits)                         \
+  V_(ImmLSUnsigned, 21, 10, Bits)                       \
+  V_(ImmLSPair, 21, 15, SignedBits)                     \
+  V_(SizeLS, 31, 30, Bits)                              \
+  V_(ImmShiftLS, 12, 12, Bits)                          \
+                                                        \
+  /* Other immediates */                                \
+  V_(ImmUncondBranch, 25, 0, SignedBits)                \
+  V_(ImmCmpBranch, 23, 5, SignedBits)                   \
+  V_(ImmLLiteral, 23, 5, SignedBits)                    \
+  V_(ImmException, 20, 5, Bits)                         \
+  V_(ImmHint, 11, 5, Bits)                              \
+  V_(ImmBarrierDomain, 11, 10, Bits)                    \
+  V_(ImmBarrierType, 9, 8, Bits)                        \
+                                                        \
+  /* System (MRS, MSR) */                               \
+  V_(ImmSystemRegister, 19, 5, Bits)                    \
+  V_(SysO0, 19, 19, Bits)                               \
+  V_(SysOp1, 18, 16, Bits)                              \
+  V_(SysOp2, 7, 5, Bits)                                \
+  V_(CRn, 15, 12, Bits)                                 \
+  V_(CRm, 11, 8, Bits)
 
 #define SYSTEM_REGISTER_FIELDS_LIST(V_, M_)                                    \
 /* NZCV */                                                                     \
@@ -857,6 +857,29 @@
   #undef LOAD_STORE_REGISTER_OFFSET
 };
 
+// Load/store acquire/release
+enum LoadStoreAcquireReleaseOp {
+  LoadStoreAcquireReleaseFixed = 0x08000000,
+  LoadStoreAcquireReleaseFMask = 0x3F000000,
+  LoadStoreAcquireReleaseMask = 0xCFC08000,
+  STLXR_b = LoadStoreAcquireReleaseFixed | 0x00008000,
+  LDAXR_b = LoadStoreAcquireReleaseFixed | 0x00408000,
+  STLR_b  = LoadStoreAcquireReleaseFixed | 0x00808000,
+  LDAR_b  = LoadStoreAcquireReleaseFixed | 0x00C08000,
+  STLXR_h = LoadStoreAcquireReleaseFixed | 0x40008000,
+  LDAXR_h = LoadStoreAcquireReleaseFixed | 0x40408000,
+  STLR_h  = LoadStoreAcquireReleaseFixed | 0x40808000,
+  LDAR_h  = LoadStoreAcquireReleaseFixed | 0x40C08000,
+  STLXR_w = LoadStoreAcquireReleaseFixed | 0x80008000,
+  LDAXR_w = LoadStoreAcquireReleaseFixed | 0x80408000,
+  STLR_w  = LoadStoreAcquireReleaseFixed | 0x80808000,
+  LDAR_w  = LoadStoreAcquireReleaseFixed | 0x80C08000,
+  STLXR_x = LoadStoreAcquireReleaseFixed | 0xC0008000,
+  LDAXR_x = LoadStoreAcquireReleaseFixed | 0xC0408000,
+  STLR_x  = LoadStoreAcquireReleaseFixed | 0xC0808000,
+  LDAR_x  = LoadStoreAcquireReleaseFixed | 0xC0C08000,
+};
+
 // Conditional compare.
 enum ConditionalCompareOp {
   ConditionalCompareMask = 0x60000000,
diff --git a/src/arm64/decoder-arm64-inl.h b/src/arm64/decoder-arm64-inl.h
index e00105e..2405f87 100644
--- a/src/arm64/decoder-arm64-inl.h
+++ b/src/arm64/decoder-arm64-inl.h
@@ -217,8 +217,15 @@
     if (instr->Bit(28) == 0) {
       if (instr->Bit(29) == 0) {
         if (instr->Bit(26) == 0) {
-          // TODO(all): VisitLoadStoreExclusive.
-          V::VisitUnimplemented(instr);
+          if (instr->Mask(0xA08000) == 0x800000 ||
+              instr->Mask(0xA00000) == 0xA00000) {
+            V::VisitUnallocated(instr);
+          } else if (instr->Mask(0x808000) == 0) {
+            // Load/Store exclusive without acquire/release are unimplemented.
+            V::VisitUnimplemented(instr);
+          } else {
+            V::VisitLoadStoreAcquireRelease(instr);
+          }
         } else {
           DecodeAdvSIMDLoadStore(instr);
         }
diff --git a/src/arm64/decoder-arm64.h b/src/arm64/decoder-arm64.h
index b1ef41f..a17b324 100644
--- a/src/arm64/decoder-arm64.h
+++ b/src/arm64/decoder-arm64.h
@@ -16,49 +16,50 @@
 
 // List macro containing all visitors needed by the decoder class.
 
-#define VISITOR_LIST(V)             \
-  V(PCRelAddressing)                \
-  V(AddSubImmediate)                \
-  V(LogicalImmediate)               \
-  V(MoveWideImmediate)              \
-  V(Bitfield)                       \
-  V(Extract)                        \
-  V(UnconditionalBranch)            \
-  V(UnconditionalBranchToRegister)  \
-  V(CompareBranch)                  \
-  V(TestBranch)                     \
-  V(ConditionalBranch)              \
-  V(System)                         \
-  V(Exception)                      \
-  V(LoadStorePairPostIndex)         \
-  V(LoadStorePairOffset)            \
-  V(LoadStorePairPreIndex)          \
-  V(LoadLiteral)                    \
-  V(LoadStoreUnscaledOffset)        \
-  V(LoadStorePostIndex)             \
-  V(LoadStorePreIndex)              \
-  V(LoadStoreRegisterOffset)        \
-  V(LoadStoreUnsignedOffset)        \
-  V(LogicalShifted)                 \
-  V(AddSubShifted)                  \
-  V(AddSubExtended)                 \
-  V(AddSubWithCarry)                \
-  V(ConditionalCompareRegister)     \
-  V(ConditionalCompareImmediate)    \
-  V(ConditionalSelect)              \
-  V(DataProcessing1Source)          \
-  V(DataProcessing2Source)          \
-  V(DataProcessing3Source)          \
-  V(FPCompare)                      \
-  V(FPConditionalCompare)           \
-  V(FPConditionalSelect)            \
-  V(FPImmediate)                    \
-  V(FPDataProcessing1Source)        \
-  V(FPDataProcessing2Source)        \
-  V(FPDataProcessing3Source)        \
-  V(FPIntegerConvert)               \
-  V(FPFixedPointConvert)            \
-  V(Unallocated)                    \
+#define VISITOR_LIST(V)            \
+  V(PCRelAddressing)               \
+  V(AddSubImmediate)               \
+  V(LogicalImmediate)              \
+  V(MoveWideImmediate)             \
+  V(Bitfield)                      \
+  V(Extract)                       \
+  V(UnconditionalBranch)           \
+  V(UnconditionalBranchToRegister) \
+  V(CompareBranch)                 \
+  V(TestBranch)                    \
+  V(ConditionalBranch)             \
+  V(System)                        \
+  V(Exception)                     \
+  V(LoadStorePairPostIndex)        \
+  V(LoadStorePairOffset)           \
+  V(LoadStorePairPreIndex)         \
+  V(LoadLiteral)                   \
+  V(LoadStoreUnscaledOffset)       \
+  V(LoadStorePostIndex)            \
+  V(LoadStorePreIndex)             \
+  V(LoadStoreRegisterOffset)       \
+  V(LoadStoreUnsignedOffset)       \
+  V(LoadStoreAcquireRelease)       \
+  V(LogicalShifted)                \
+  V(AddSubShifted)                 \
+  V(AddSubExtended)                \
+  V(AddSubWithCarry)               \
+  V(ConditionalCompareRegister)    \
+  V(ConditionalCompareImmediate)   \
+  V(ConditionalSelect)             \
+  V(DataProcessing1Source)         \
+  V(DataProcessing2Source)         \
+  V(DataProcessing3Source)         \
+  V(FPCompare)                     \
+  V(FPConditionalCompare)          \
+  V(FPConditionalSelect)           \
+  V(FPImmediate)                   \
+  V(FPDataProcessing1Source)       \
+  V(FPDataProcessing2Source)       \
+  V(FPDataProcessing3Source)       \
+  V(FPIntegerConvert)              \
+  V(FPFixedPointConvert)           \
+  V(Unallocated)                   \
   V(Unimplemented)
 
 // The Visitor interface. Disassembler and simulator (and other tools)
diff --git a/src/arm64/deoptimizer-arm64.cc b/src/arm64/deoptimizer-arm64.cc
index fe2a269..c1d04ac 100644
--- a/src/arm64/deoptimizer-arm64.cc
+++ b/src/arm64/deoptimizer-arm64.cc
@@ -97,8 +97,7 @@
   // Save all allocatable floating point registers.
   CPURegList saved_fp_registers(
       CPURegister::kFPRegister, kDRegSizeInBits,
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
-          ->allocatable_double_codes_mask());
+      RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask());
   __ PushCPURegList(saved_fp_registers);
 
   // We save all the registers expcept jssp, sp and lr.
diff --git a/src/arm64/disasm-arm64.cc b/src/arm64/disasm-arm64.cc
index 00c3ec2..8e022b1 100644
--- a/src/arm64/disasm-arm64.cc
+++ b/src/arm64/disasm-arm64.cc
@@ -914,6 +914,34 @@
   Format(instr, mnemonic, form);
 }
 
+void DisassemblingDecoder::VisitLoadStoreAcquireRelease(Instruction *instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "'Wt, ['Xn]";
+  const char *form_x = "'Xt, ['Xn]";
+  const char *form_stlx = "'Ws, 'Wt, ['Xn]";
+  const char *form_stlx_x = "'Ws, 'Xt, ['Xn]";
+
+  switch (instr->Mask(LoadStoreAcquireReleaseMask)) {
+    case LDAXR_b: mnemonic = "ldaxrb"; break;
+    case STLR_b:  mnemonic = "stlrb"; break;
+    case LDAR_b:  mnemonic = "ldarb"; break;
+    case LDAXR_h: mnemonic = "ldaxrh"; break;
+    case STLR_h:  mnemonic = "stlrh"; break;
+    case LDAR_h:  mnemonic = "ldarh"; break;
+    case LDAXR_w: mnemonic = "ldaxr"; break;
+    case STLR_w:  mnemonic = "stlr"; break;
+    case LDAR_w:  mnemonic = "ldar"; break;
+    case LDAXR_x: mnemonic = "ldaxr"; form = form_x; break;
+    case STLR_x:  mnemonic = "stlr"; form = form_x; break;
+    case LDAR_x:  mnemonic = "ldar"; form = form_x; break;
+    case STLXR_h: mnemonic = "stlxrh"; form = form_stlx; break;
+    case STLXR_b: mnemonic = "stlxrb"; form = form_stlx; break;
+    case STLXR_w: mnemonic = "stlxr"; form = form_stlx; break;
+    case STLXR_x: mnemonic = "stlxr"; form = form_stlx_x; break;
+    default: form = "(LoadStoreAcquireReleaseMask)";
+  }
+  Format(instr, mnemonic, form);
+}
 
 void DisassemblingDecoder::VisitFPCompare(Instruction* instr) {
   const char *mnemonic = "unimplemented";
@@ -1295,6 +1323,9 @@
       }
       break;
     }
+    case 's':
+      reg_num = instr->Rs();
+      break;
     default: UNREACHABLE();
   }
 
@@ -1719,7 +1750,7 @@
 
 
 const char* NameConverter::NameOfAddress(byte* addr) const {
-  v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+  v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void *>(addr));
   return tmp_buffer_.start();
 }
 
@@ -1771,7 +1802,8 @@
   ~BufferDisassembler() { }
 
   virtual void ProcessOutput(v8::internal::Instruction* instr) {
-    v8::internal::SNPrintF(out_buffer_, "%s", GetOutput());
+    v8::internal::SNPrintF(out_buffer_, "%08" PRIx32 "       %s",
+                           instr->InstructionBits(), GetOutput());
   }
 
  private:
diff --git a/src/arm64/instrument-arm64.cc b/src/arm64/instrument-arm64.cc
index 7a8e2f4..dad89fe 100644
--- a/src/arm64/instrument-arm64.cc
+++ b/src/arm64/instrument-arm64.cc
@@ -429,6 +429,31 @@
   InstrumentLoadStore(instr);
 }
 
+void Instrument::VisitLoadStoreAcquireRelease(Instruction* instr) {
+  Update();
+  static Counter* load_counter = GetCounter("Load Acquire");
+  static Counter* store_counter = GetCounter("Store Release");
+
+  switch (instr->Mask(LoadStoreAcquireReleaseMask)) {
+    case LDAR_b:   // Fall-through.
+    case LDAR_h:   // Fall-through.
+    case LDAR_w:   // Fall-through.
+    case LDAR_x:   // Fall-through.
+    case LDAXR_b:  // Fall-through.
+    case LDAXR_h:  // Fall-through.
+    case LDAXR_w:  // Fall-through.
+    case LDAXR_x: load_counter->Increment(); break;
+    case STLR_b:   // Fall-through.
+    case STLR_h:   // Fall-through.
+    case STLR_w:   // Fall-through.
+    case STLR_x:   // Fall-through.
+    case STLXR_b:  // Fall-through.
+    case STLXR_h:  // Fall-through.
+    case STLXR_w:  // Fall-through.
+    case STLXR_x: store_counter->Increment(); break;
+    default: UNREACHABLE();
+  }
+}
 
 void Instrument::VisitLogicalShifted(Instruction* instr) {
   Update();
diff --git a/src/arm64/interface-descriptors-arm64.cc b/src/arm64/interface-descriptors-arm64.cc
index d23eb58..573d89e 100644
--- a/src/arm64/interface-descriptors-arm64.cc
+++ b/src/arm64/interface-descriptors-arm64.cc
@@ -13,6 +13,14 @@
 
 const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
 
+void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
+    CallInterfaceDescriptorData* data, int register_parameter_count) {
+  const Register default_stub_registers[] = {x0, x1, x2, x3, x4};
+  CHECK_LE(static_cast<size_t>(register_parameter_count),
+           arraysize(default_stub_registers));
+  data->InitializePlatformSpecific(register_parameter_count,
+                                   default_stub_registers);
+}
 
 const Register LoadDescriptor::ReceiverRegister() { return x1; }
 const Register LoadDescriptor::NameRegister() { return x2; }
@@ -41,9 +49,6 @@
 const Register StoreTransitionDescriptor::MapRegister() { return x3; }
 
 
-const Register LoadGlobalViaContextDescriptor::SlotRegister() { return x2; }
-
-
 const Register StoreGlobalViaContextDescriptor::SlotRegister() { return x2; }
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return x0; }
 
@@ -63,8 +68,6 @@
 const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
 const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
 
-const Register HasPropertyDescriptor::ObjectRegister() { return x0; }
-const Register HasPropertyDescriptor::KeyRegister() { return x3; }
 
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -280,41 +283,24 @@
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
+void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
+  // register state
+  // x0: number of arguments
   // x1: function
   // x2: allocation site with elements kind
-  // x0: number of arguments to the constructor function
-  Register registers[] = {x1, x2};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
+  Register registers[] = {x1, x2, x0};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
+void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // stack param count needs (constructor pointer, and single argument)
   Register registers[] = {x1, x2, x0};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-void InternalArrayConstructorConstantArgCountDescriptor::
-    InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
-  // x1: constructor function
-  // x0: number of arguments to the constructor function
-  Register registers[] = {x1};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  // stack param count needs (constructor pointer, and single argument)
-  Register registers[] = {x1, x0};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastArrayPushDescriptor::InitializePlatformSpecific(
+void VarArgFunctionDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // stack param count needs (arg count)
   Register registers[] = {x0};
diff --git a/src/arm64/macro-assembler-arm64-inl.h b/src/arm64/macro-assembler-arm64-inl.h
index 60418ad..f19d690 100644
--- a/src/arm64/macro-assembler-arm64-inl.h
+++ b/src/arm64/macro-assembler-arm64-inl.h
@@ -309,6 +309,22 @@
 LSPAIR_MACRO_LIST(DEFINE_FUNCTION)
 #undef DEFINE_FUNCTION
 
+#define DECLARE_FUNCTION(FN, OP)                                    \
+  void MacroAssembler::FN(const Register& rt, const Register& rn) { \
+    DCHECK(allow_macro_instructions_);                              \
+    OP(rt, rn);                                                     \
+  }
+LDA_STL_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
+
+#define DECLARE_FUNCTION(FN, OP)                                  \
+  void MacroAssembler::FN(const Register& rs, const Register& rt, \
+                          const Register& rn) {                   \
+    DCHECK(allow_macro_instructions_);                            \
+    OP(rs, rt, rn);                                               \
+  }
+STLX_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
 
 void MacroAssembler::Asr(const Register& rd,
                          const Register& rn,
diff --git a/src/arm64/macro-assembler-arm64.cc b/src/arm64/macro-assembler-arm64.cc
index 8a54e20..83b33b7 100644
--- a/src/arm64/macro-assembler-arm64.cc
+++ b/src/arm64/macro-assembler-arm64.cc
@@ -1971,9 +1971,6 @@
   Label start_call;
   Bind(&start_call);
 #endif
-  // Statement positions are expected to be recorded when the target
-  // address is loaded.
-  positions_recorder()->WriteRecordedPositions();
 
   // Addresses always have 64 bits, so we shouldn't encounter NONE32.
   DCHECK(rmode != RelocInfo::NONE32);
@@ -2496,11 +2493,12 @@
                                              const ParameterCount& expected,
                                              const ParameterCount& actual) {
   Label skip_flooding;
-  ExternalReference step_in_enabled =
-      ExternalReference::debug_step_in_enabled_address(isolate());
-  Mov(x4, Operand(step_in_enabled));
-  ldrb(x4, MemOperand(x4));
-  CompareAndBranch(x4, Operand(0), eq, &skip_flooding);
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  Mov(x4, Operand(last_step_action));
+  Ldrsb(x4, MemOperand(x4));
+  CompareAndBranch(x4, Operand(StepIn), lt, &skip_flooding);
   {
     FrameScope frame(this,
                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -2761,9 +2759,8 @@
 
 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
   Ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  Ldr(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
-  Ldr(vector,
-      FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+  Ldr(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
+  Ldr(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
 }
 
 
@@ -4132,16 +4129,14 @@
   PushSafepointRegisters();
   PushCPURegList(CPURegList(
       CPURegister::kFPRegister, kDRegSizeInBits,
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
-          ->allocatable_double_codes_mask()));
+      RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask()));
 }
 
 
 void MacroAssembler::PopSafepointRegistersAndDoubles() {
   PopCPURegList(CPURegList(
       CPURegister::kFPRegister, kDRegSizeInBits,
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
-          ->allocatable_double_codes_mask()));
+      RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask()));
   PopSafepointRegisters();
 }
 
diff --git a/src/arm64/macro-assembler-arm64.h b/src/arm64/macro-assembler-arm64.h
index 67e64f4..246d574 100644
--- a/src/arm64/macro-assembler-arm64.h
+++ b/src/arm64/macro-assembler-arm64.h
@@ -68,6 +68,21 @@
   V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
   V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
 
+#define LDA_STL_MACRO_LIST(V) \
+  V(Ldarb, ldarb)             \
+  V(Ldarh, ldarh)             \
+  V(Ldar, ldar)               \
+  V(Ldaxrb, ldaxrb)           \
+  V(Ldaxrh, ldaxrh)           \
+  V(Ldaxr, ldaxr)             \
+  V(Stlrb, stlrb)             \
+  V(Stlrh, stlrh)             \
+  V(Stlr, stlr)
+
+#define STLX_MACRO_LIST(V) \
+  V(Stlxrb, stlxrb)        \
+  V(Stlxrh, stlxrh)        \
+  V(Stlxr, stlxr)
 
 // ----------------------------------------------------------------------------
 // Static helper functions
@@ -295,6 +310,17 @@
   void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
                           const MemOperand& addr, LoadStorePairOp op);
 
+// Load-acquire/store-release macros.
+#define DECLARE_FUNCTION(FN, OP) \
+  inline void FN(const Register& rt, const Register& rn);
+  LDA_STL_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
+
+#define DECLARE_FUNCTION(FN, OP) \
+  inline void FN(const Register& rs, const Register& rt, const Register& rn);
+  STLX_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
+
   // V8-specific load/store helpers.
   void Load(const Register& rt, const MemOperand& addr, Representation r);
   void Store(const Register& rt, const MemOperand& addr, Representation r);
diff --git a/src/arm64/simulator-arm64.cc b/src/arm64/simulator-arm64.cc
index 81dbdf8..aa10eb2 100644
--- a/src/arm64/simulator-arm64.cc
+++ b/src/arm64/simulator-arm64.cc
@@ -524,7 +524,7 @@
 
 
 // static
-void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
+void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
   Redirection::DeleteChain(first);
 }
 
@@ -609,7 +609,8 @@
                xreg(4), xreg(5), xreg(6), xreg(7));
       ObjectPair result = target(xreg(0), xreg(1), xreg(2), xreg(3),
                                  xreg(4), xreg(5), xreg(6), xreg(7));
-      TraceSim("Returned: {%p, %p}\n", result.x, result.y);
+      TraceSim("Returned: {%p, %p}\n", static_cast<void*>(result.x),
+               static_cast<void*>(result.y));
 #ifdef DEBUG
       CorruptAllCallerSavedCPURegisters();
 #endif
@@ -639,7 +640,8 @@
       ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(xreg(8));
       ObjectTriple result = target(xreg(0), xreg(1), xreg(2), xreg(3), xreg(4),
                                    xreg(5), xreg(6), xreg(7));
-      TraceSim("Returned: {%p, %p, %p}\n", result.x, result.y, result.z);
+      TraceSim("Returned: {%p, %p, %p}\n", static_cast<void*>(result.x),
+               static_cast<void*>(result.y), static_cast<void*>(result.z));
 #ifdef DEBUG
       CorruptAllCallerSavedCPURegisters();
 #endif
@@ -1900,6 +1902,9 @@
   }
 }
 
+void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
+  // TODO(binji)
+}
 
 void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) {
   if ((address >= stack_limit_) && (address < stack)) {
diff --git a/src/arm64/simulator-arm64.h b/src/arm64/simulator-arm64.h
index 586f204..cc2dcc2 100644
--- a/src/arm64/simulator-arm64.h
+++ b/src/arm64/simulator-arm64.h
@@ -151,8 +151,7 @@
 
 class Simulator : public DecoderVisitor {
  public:
-  static void FlushICache(v8::internal::HashMap* i_cache, void* start,
-                          size_t size) {
+  static void FlushICache(base::HashMap* i_cache, void* start, size_t size) {
     USE(i_cache);
     USE(start);
     USE(size);
@@ -168,7 +167,7 @@
 
   static void Initialize(Isolate* isolate);
 
-  static void TearDown(HashMap* i_cache, Redirection* first);
+  static void TearDown(base::HashMap* i_cache, Redirection* first);
 
   static Simulator* current(v8::internal::Isolate* isolate);
 
diff --git a/src/assembler.cc b/src/assembler.cc
index 17cd56b..c7e819a 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -39,6 +39,7 @@
 #include "src/api.h"
 #include "src/base/cpu.h"
 #include "src/base/functional.h"
+#include "src/base/ieee754.h"
 #include "src/base/lazy-instance.h"
 #include "src/base/platform/platform.h"
 #include "src/base/utils/random-number-generator.h"
@@ -53,7 +54,6 @@
 #include "src/ic/stub-cache.h"
 #include "src/interpreter/interpreter.h"
 #include "src/ostreams.h"
-#include "src/profiler/cpu-profiler.h"
 #include "src/regexp/jsregexp.h"
 #include "src/regexp/regexp-macro-assembler.h"
 #include "src/regexp/regexp-stack.h"
@@ -114,39 +114,6 @@
 namespace internal {
 
 // -----------------------------------------------------------------------------
-// Common register code.
-
-const char* Register::ToString() {
-  // This is the mapping of allocation indices to registers.
-  DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
-  return RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
-      ->GetGeneralRegisterName(reg_code);
-}
-
-
-bool Register::IsAllocatable() const {
-  return ((1 << reg_code) &
-          RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
-              ->allocatable_general_codes_mask()) != 0;
-}
-
-
-const char* DoubleRegister::ToString() {
-  // This is the mapping of allocation indices to registers.
-  DCHECK(reg_code >= 0 && reg_code < kMaxNumRegisters);
-  return RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
-      ->GetDoubleRegisterName(reg_code);
-}
-
-
-bool DoubleRegister::IsAllocatable() const {
-  return ((1 << reg_code) &
-          RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
-              ->allocatable_double_codes_mask()) != 0;
-}
-
-
-// -----------------------------------------------------------------------------
 // Common double constants.
 
 struct DoubleConstant BASE_EMBEDDED {
@@ -162,11 +129,6 @@
 
 const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
 
-static bool math_exp_data_initialized = false;
-static base::Mutex* math_exp_data_mutex = NULL;
-static double* math_exp_constants_array = NULL;
-static double* math_exp_log_table_array = NULL;
-
 // -----------------------------------------------------------------------------
 // Implementation of AssemblerBase
 
@@ -361,6 +323,49 @@
 const int kStatementPositionTag = 2;
 const int kDeoptReasonTag = 3;
 
+void RelocInfo::update_wasm_memory_reference(
+    Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
+    ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
+  if (IsWasmMemoryReference(rmode_)) {
+    Address updated_reference;
+    DCHECK(old_size == 0 || Memory::IsAddressInRange(
+                                old_base, wasm_memory_reference(), old_size));
+    updated_reference = new_base + (wasm_memory_reference() - old_base);
+    DCHECK(new_size == 0 ||
+           Memory::IsAddressInRange(new_base, updated_reference, new_size));
+    unchecked_update_wasm_memory_reference(updated_reference,
+                                           icache_flush_mode);
+  } else if (IsWasmMemorySizeReference(rmode_)) {
+    uint32_t updated_size_reference;
+    DCHECK(old_size == 0 || wasm_memory_size_reference() <= old_size);
+    updated_size_reference =
+        new_size + (wasm_memory_size_reference() - old_size);
+    DCHECK(updated_size_reference <= new_size);
+    unchecked_update_wasm_memory_size(updated_size_reference,
+                                      icache_flush_mode);
+  } else {
+    UNREACHABLE();
+  }
+  if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+    Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
+  }
+}
+
+void RelocInfo::update_wasm_global_reference(
+    Address old_base, Address new_base, ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsWasmGlobalReference(rmode_));
+  Address updated_reference;
+  DCHECK(reinterpret_cast<uintptr_t>(old_base) <=
+         reinterpret_cast<uintptr_t>(wasm_global_reference()));
+  updated_reference = new_base + (wasm_global_reference() - old_base);
+  DCHECK(reinterpret_cast<uintptr_t>(new_base) <=
+         reinterpret_cast<uintptr_t>(updated_reference));
+  unchecked_update_wasm_memory_reference(updated_reference, icache_flush_mode);
+  if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+    Assembler::FlushICache(isolate_, pc_, sizeof(int32_t));
+  }
+}
 
 uint32_t RelocInfoWriter::WriteLongPCJump(uint32_t pc_delta) {
   // Return if the pc_delta can fit in kSmallPCDeltaBits bits.
@@ -851,6 +856,8 @@
       return "wasm memory reference";
     case WASM_MEMORY_SIZE_REFERENCE:
       return "wasm memory size reference";
+    case WASM_GLOBAL_REFERENCE:
+      return "wasm global value reference";
     case NUMBER_OF_MODES:
     case PC_JUMP:
       UNREACHABLE();
@@ -948,6 +955,7 @@
     case GENERATOR_CONTINUATION:
     case WASM_MEMORY_REFERENCE:
     case WASM_MEMORY_SIZE_REFERENCE:
+    case WASM_GLOBAL_REFERENCE:
     case NONE32:
     case NONE64:
       break;
@@ -987,61 +995,6 @@
   double_constants.negative_infinity = -V8_INFINITY;
   double_constants.uint32_bias =
     static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
-
-  math_exp_data_mutex = new base::Mutex();
-}
-
-
-void ExternalReference::InitializeMathExpData() {
-  // Early return?
-  if (math_exp_data_initialized) return;
-
-  base::LockGuard<base::Mutex> lock_guard(math_exp_data_mutex);
-  if (!math_exp_data_initialized) {
-    // If this is changed, generated code must be adapted too.
-    const int kTableSizeBits = 11;
-    const int kTableSize = 1 << kTableSizeBits;
-    const double kTableSizeDouble = static_cast<double>(kTableSize);
-
-    math_exp_constants_array = new double[9];
-    // Input values smaller than this always return 0.
-    math_exp_constants_array[0] = -708.39641853226408;
-    // Input values larger than this always return +Infinity.
-    math_exp_constants_array[1] = 709.78271289338397;
-    math_exp_constants_array[2] = V8_INFINITY;
-    // The rest is black magic. Do not attempt to understand it. It is
-    // loosely based on the "expd" function published at:
-    // http://herumi.blogspot.com/2011/08/fast-double-precision-exponential.html
-    const double constant3 = (1 << kTableSizeBits) / std::log(2.0);
-    math_exp_constants_array[3] = constant3;
-    math_exp_constants_array[4] =
-        static_cast<double>(static_cast<int64_t>(3) << 51);
-    math_exp_constants_array[5] = 1 / constant3;
-    math_exp_constants_array[6] = 3.0000000027955394;
-    math_exp_constants_array[7] = 0.16666666685227835;
-    math_exp_constants_array[8] = 1;
-
-    math_exp_log_table_array = new double[kTableSize];
-    for (int i = 0; i < kTableSize; i++) {
-      double value = std::pow(2, i / kTableSizeDouble);
-      uint64_t bits = bit_cast<uint64_t, double>(value);
-      bits &= (static_cast<uint64_t>(1) << 52) - 1;
-      double mantissa = bit_cast<double, uint64_t>(bits);
-      math_exp_log_table_array[i] = mantissa;
-    }
-
-    math_exp_data_initialized = true;
-  }
-}
-
-
-void ExternalReference::TearDownMathExpData() {
-  delete[] math_exp_constants_array;
-  math_exp_constants_array = NULL;
-  delete[] math_exp_log_table_array;
-  math_exp_log_table_array = NULL;
-  delete math_exp_data_mutex;
-  math_exp_data_mutex = NULL;
 }
 
 
@@ -1288,64 +1241,27 @@
       Redirect(isolate, FUNCTION_ADDR(wasm::word64_popcnt_wrapper)));
 }
 
-static void f64_acos_wrapper(double* param) { *param = std::acos(*param); }
+static void f64_acos_wrapper(double* param) {
+  WriteDoubleValue(param, std::acos(ReadDoubleValue(param)));
+}
 
 ExternalReference ExternalReference::f64_acos_wrapper_function(
     Isolate* isolate) {
   return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_acos_wrapper)));
 }
 
-static void f64_asin_wrapper(double* param) { *param = std::asin(*param); }
+static void f64_asin_wrapper(double* param) {
+  WriteDoubleValue(param, std::asin(ReadDoubleValue(param)));
+}
 
 ExternalReference ExternalReference::f64_asin_wrapper_function(
     Isolate* isolate) {
   return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_asin_wrapper)));
 }
 
-static void f64_atan_wrapper(double* param) { *param = std::atan(*param); }
-
-ExternalReference ExternalReference::f64_atan_wrapper_function(
-    Isolate* isolate) {
-  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_atan_wrapper)));
-}
-
-static void f64_cos_wrapper(double* param) { *param = std::cos(*param); }
-
-ExternalReference ExternalReference::f64_cos_wrapper_function(
-    Isolate* isolate) {
-  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_cos_wrapper)));
-}
-
-static void f64_sin_wrapper(double* param) { *param = std::sin(*param); }
-
-ExternalReference ExternalReference::f64_sin_wrapper_function(
-    Isolate* isolate) {
-  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_sin_wrapper)));
-}
-
-static void f64_tan_wrapper(double* param) { *param = std::tan(*param); }
-
-ExternalReference ExternalReference::f64_tan_wrapper_function(
-    Isolate* isolate) {
-  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_tan_wrapper)));
-}
-
-static void f64_exp_wrapper(double* param) { *param = std::exp(*param); }
-
-ExternalReference ExternalReference::f64_exp_wrapper_function(
-    Isolate* isolate) {
-  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_exp_wrapper)));
-}
-
-static void f64_log_wrapper(double* param) { *param = std::log(*param); }
-
-ExternalReference ExternalReference::f64_log_wrapper_function(
-    Isolate* isolate) {
-  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_log_wrapper)));
-}
-
 static void f64_pow_wrapper(double* param0, double* param1) {
-  *param0 = power_double_double(*param0, *param1);
+  WriteDoubleValue(param0, power_double_double(ReadDoubleValue(param0),
+                                               ReadDoubleValue(param1)));
 }
 
 ExternalReference ExternalReference::f64_pow_wrapper_function(
@@ -1353,32 +1269,9 @@
   return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_pow_wrapper)));
 }
 
-static void f64_atan2_wrapper(double* param0, double* param1) {
-  double x = *param0;
-  double y = *param1;
-  // TODO(bradnelson): Find a good place to put this to share
-  // with the same code in src/runtime/runtime-math.cc
-  static const double kPiDividedBy4 = 0.78539816339744830962;
-  if (std::isinf(x) && std::isinf(y)) {
-    // Make sure that the result in case of two infinite arguments
-    // is a multiple of Pi / 4. The sign of the result is determined
-    // by the first argument (x) and the sign of the second argument
-    // determines the multiplier: one or three.
-    int multiplier = (x < 0) ? -1 : 1;
-    if (y < 0) multiplier *= 3;
-    *param0 = multiplier * kPiDividedBy4;
-  } else {
-    *param0 = std::atan2(x, y);
-  }
-}
-
-ExternalReference ExternalReference::f64_atan2_wrapper_function(
-    Isolate* isolate) {
-  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_atan2_wrapper)));
-}
-
 static void f64_mod_wrapper(double* param0, double* param1) {
-  *param0 = modulo(*param0, *param1);
+  WriteDoubleValue(param0,
+                   modulo(ReadDoubleValue(param0), ReadDoubleValue(param1)));
 }
 
 ExternalReference ExternalReference::f64_mod_wrapper_function(
@@ -1533,7 +1426,7 @@
 
 
 ExternalReference ExternalReference::is_profiling_address(Isolate* isolate) {
-  return ExternalReference(isolate->cpu_profiler()->is_profiling_address());
+  return ExternalReference(isolate->is_profiling_address());
 }
 
 
@@ -1623,28 +1516,70 @@
 
 #endif  // V8_INTERPRETED_REGEXP
 
-
-ExternalReference ExternalReference::math_log_double_function(
-    Isolate* isolate) {
-  typedef double (*d2d)(double x);
-  return ExternalReference(Redirect(isolate,
-                                    FUNCTION_ADDR(static_cast<d2d>(std::log)),
-                                    BUILTIN_FP_CALL));
-}
-
-
-ExternalReference ExternalReference::math_exp_constants(int constant_index) {
-  DCHECK(math_exp_data_initialized);
+ExternalReference ExternalReference::ieee754_atan_function(Isolate* isolate) {
   return ExternalReference(
-      reinterpret_cast<void*>(math_exp_constants_array + constant_index));
+      Redirect(isolate, FUNCTION_ADDR(base::ieee754::atan), BUILTIN_FP_CALL));
 }
 
-
-ExternalReference ExternalReference::math_exp_log_table() {
-  DCHECK(math_exp_data_initialized);
-  return ExternalReference(reinterpret_cast<void*>(math_exp_log_table_array));
+ExternalReference ExternalReference::ieee754_atan2_function(Isolate* isolate) {
+  return ExternalReference(Redirect(
+      isolate, FUNCTION_ADDR(base::ieee754::atan2), BUILTIN_FP_FP_CALL));
 }
 
+ExternalReference ExternalReference::ieee754_atanh_function(Isolate* isolate) {
+  return ExternalReference(Redirect(
+      isolate, FUNCTION_ADDR(base::ieee754::atanh), BUILTIN_FP_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_cbrt_function(Isolate* isolate) {
+  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(base::ieee754::cbrt),
+                                    BUILTIN_FP_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_cos_function(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(base::ieee754::cos), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_exp_function(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(base::ieee754::exp), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_expm1_function(Isolate* isolate) {
+  return ExternalReference(Redirect(
+      isolate, FUNCTION_ADDR(base::ieee754::expm1), BUILTIN_FP_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_log_function(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(base::ieee754::log), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_log1p_function(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(base::ieee754::log1p), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_log10_function(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(base::ieee754::log10), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_log2_function(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(base::ieee754::log2), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_sin_function(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(base::ieee754::sin), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_tan_function(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(base::ieee754::tan), BUILTIN_FP_CALL));
+}
 
 ExternalReference ExternalReference::page_flags(Page* page) {
   return ExternalReference(reinterpret_cast<Address>(page) +
@@ -1768,12 +1703,15 @@
                                     BUILTIN_FP_FP_CALL));
 }
 
-
-ExternalReference ExternalReference::debug_step_in_enabled_address(
+ExternalReference ExternalReference::debug_last_step_action_address(
     Isolate* isolate) {
-  return ExternalReference(isolate->debug()->step_in_enabled_address());
+  return ExternalReference(isolate->debug()->last_step_action_address());
 }
 
+ExternalReference ExternalReference::debug_suspended_generator_address(
+    Isolate* isolate) {
+  return ExternalReference(isolate->debug()->suspended_generator_address());
+}
 
 ExternalReference ExternalReference::fixed_typed_array_base_data_offset() {
   return ExternalReference(reinterpret_cast<void*>(
@@ -1806,49 +1744,44 @@
 void AssemblerPositionsRecorder::RecordPosition(int pos) {
   DCHECK(pos != RelocInfo::kNoPosition);
   DCHECK(pos >= 0);
-  state_.current_position = pos;
+  current_position_ = pos;
   LOG_CODE_EVENT(assembler_->isolate(),
                  CodeLinePosInfoAddPositionEvent(jit_handler_data_,
                                                  assembler_->pc_offset(),
                                                  pos));
+  WriteRecordedPositions();
 }
 
 void AssemblerPositionsRecorder::RecordStatementPosition(int pos) {
   DCHECK(pos != RelocInfo::kNoPosition);
   DCHECK(pos >= 0);
-  state_.current_statement_position = pos;
+  current_statement_position_ = pos;
   LOG_CODE_EVENT(assembler_->isolate(),
                  CodeLinePosInfoAddStatementPositionEvent(
                      jit_handler_data_,
                      assembler_->pc_offset(),
                      pos));
+  RecordPosition(pos);
 }
 
-bool AssemblerPositionsRecorder::WriteRecordedPositions() {
-  bool written = false;
-
+void AssemblerPositionsRecorder::WriteRecordedPositions() {
   // Write the statement position if it is different from what was written last
   // time.
-  if (state_.current_statement_position != state_.written_statement_position) {
+  if (current_statement_position_ != written_statement_position_) {
     EnsureSpace ensure_space(assembler_);
     assembler_->RecordRelocInfo(RelocInfo::STATEMENT_POSITION,
-                                state_.current_statement_position);
-    state_.written_position = state_.current_statement_position;
-    state_.written_statement_position = state_.current_statement_position;
-    written = true;
+                                current_statement_position_);
+    written_position_ = current_statement_position_;
+    written_statement_position_ = current_statement_position_;
   }
 
   // Write the position if it is different from what was written last time and
   // also different from the statement position that was just written.
-  if (state_.current_position != state_.written_position) {
+  if (current_position_ != written_position_) {
     EnsureSpace ensure_space(assembler_);
-    assembler_->RecordRelocInfo(RelocInfo::POSITION, state_.current_position);
-    state_.written_position = state_.current_position;
-    written = true;
+    assembler_->RecordRelocInfo(RelocInfo::POSITION, current_position_);
+    written_position_ = current_position_;
   }
-
-  // Return whether something was written.
-  return written;
 }
 
 
@@ -2057,7 +1990,7 @@
 // Platform specific but identical code for all the platforms.
 
 void Assembler::RecordDeoptReason(const int reason, int raw_position, int id) {
-  if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling()) {
+  if (FLAG_trace_deopt || isolate()->is_profiling()) {
     EnsureSpace ensure_space(this);
     RecordRelocInfo(RelocInfo::POSITION, raw_position);
     RecordRelocInfo(RelocInfo::DEOPT_REASON, reason);
diff --git a/src/assembler.h b/src/assembler.h
index 353abdb..0c2b7e8 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -39,6 +39,7 @@
 #include "src/builtins.h"
 #include "src/isolate.h"
 #include "src/log.h"
+#include "src/register-configuration.h"
 #include "src/runtime/runtime.h"
 
 namespace v8 {
@@ -386,6 +387,7 @@
     EMBEDDED_OBJECT,
     // To relocate pointers into the wasm memory embedded in wasm code
     WASM_MEMORY_REFERENCE,
+    WASM_GLOBAL_REFERENCE,
     WASM_MEMORY_SIZE_REFERENCE,
     CELL,
 
@@ -448,8 +450,7 @@
   }
 
   static inline bool IsRealRelocMode(Mode mode) {
-    return mode >= FIRST_REAL_RELOC_MODE &&
-        mode <= LAST_REAL_RELOC_MODE;
+    return mode >= FIRST_REAL_RELOC_MODE && mode <= LAST_REAL_RELOC_MODE;
   }
   static inline bool IsCodeTarget(Mode mode) {
     return mode <= LAST_CODE_ENUM;
@@ -529,6 +530,9 @@
   static inline bool IsWasmMemorySizeReference(Mode mode) {
     return mode == WASM_MEMORY_SIZE_REFERENCE;
   }
+  static inline bool IsWasmGlobalReference(Mode mode) {
+    return mode == WASM_GLOBAL_REFERENCE;
+  }
   static inline int ModeMask(Mode mode) { return 1 << mode; }
 
   // Accessors
@@ -556,44 +560,42 @@
   bool IsInConstantPool();
 
   Address wasm_memory_reference();
+  Address wasm_global_reference();
   uint32_t wasm_memory_size_reference();
   void update_wasm_memory_reference(
       Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
       ICacheFlushMode icache_flush_mode = SKIP_ICACHE_FLUSH);
+  void update_wasm_global_reference(
+      Address old_base, Address new_base,
+      ICacheFlushMode icache_flush_mode = SKIP_ICACHE_FLUSH);
 
   // this relocation applies to;
   // can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
   INLINE(Address target_address());
-  INLINE(void set_target_address(Address target,
-                                 WriteBarrierMode write_barrier_mode =
-                                     UPDATE_WRITE_BARRIER,
-                                 ICacheFlushMode icache_flush_mode =
-                                     FLUSH_ICACHE_IF_NEEDED));
+  INLINE(void set_target_address(
+      Address target,
+      WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
+      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
   INLINE(Object* target_object());
   INLINE(Handle<Object> target_object_handle(Assembler* origin));
-  INLINE(void set_target_object(Object* target,
-                                WriteBarrierMode write_barrier_mode =
-                                    UPDATE_WRITE_BARRIER,
-                                ICacheFlushMode icache_flush_mode =
-                                    FLUSH_ICACHE_IF_NEEDED));
+  INLINE(void set_target_object(
+      Object* target,
+      WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
+      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
   INLINE(Address target_runtime_entry(Assembler* origin));
-  INLINE(void set_target_runtime_entry(Address target,
-                                       WriteBarrierMode write_barrier_mode =
-                                           UPDATE_WRITE_BARRIER,
-                                       ICacheFlushMode icache_flush_mode =
-                                           FLUSH_ICACHE_IF_NEEDED));
+  INLINE(void set_target_runtime_entry(
+      Address target,
+      WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
+      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
   INLINE(Cell* target_cell());
   INLINE(Handle<Cell> target_cell_handle());
-  INLINE(void set_target_cell(Cell* cell,
-                              WriteBarrierMode write_barrier_mode =
-                                  UPDATE_WRITE_BARRIER,
-                              ICacheFlushMode icache_flush_mode =
-                                  FLUSH_ICACHE_IF_NEEDED));
+  INLINE(void set_target_cell(
+      Cell* cell, WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
+      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
   INLINE(Handle<Object> code_age_stub_handle(Assembler* origin));
   INLINE(Code* code_age_stub());
-  INLINE(void set_code_age_stub(Code* stub,
-                                ICacheFlushMode icache_flush_mode =
-                                    FLUSH_ICACHE_IF_NEEDED));
+  INLINE(void set_code_age_stub(
+      Code* stub, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
 
   // Returns the address of the constant pool entry where the target address
   // is held.  This should only be called if IsInConstantPool returns true.
@@ -674,6 +676,11 @@
   static const int kApplyMask;  // Modes affected by apply.  Depends on arch.
 
  private:
+  void unchecked_update_wasm_memory_reference(Address address,
+                                              ICacheFlushMode flush_mode);
+  void unchecked_update_wasm_memory_size(uint32_t size,
+                                         ICacheFlushMode flush_mode);
+
   Isolate* isolate_;
   // On ARM, note that pc_ is the address of the constant pool entry
   // to be relocated and not the address of the instruction
@@ -889,8 +896,6 @@
   };
 
   static void SetUp();
-  static void InitializeMathExpData();
-  static void TearDownMathExpData();
 
   typedef void* ExternalReferenceRedirector(Isolate* isolate, void* original,
                                             Type type);
@@ -968,13 +973,6 @@
 
   static ExternalReference f64_acos_wrapper_function(Isolate* isolate);
   static ExternalReference f64_asin_wrapper_function(Isolate* isolate);
-  static ExternalReference f64_atan_wrapper_function(Isolate* isolate);
-  static ExternalReference f64_cos_wrapper_function(Isolate* isolate);
-  static ExternalReference f64_sin_wrapper_function(Isolate* isolate);
-  static ExternalReference f64_tan_wrapper_function(Isolate* isolate);
-  static ExternalReference f64_exp_wrapper_function(Isolate* isolate);
-  static ExternalReference f64_log_wrapper_function(Isolate* isolate);
-  static ExternalReference f64_atan2_wrapper_function(Isolate* isolate);
   static ExternalReference f64_pow_wrapper_function(Isolate* isolate);
   static ExternalReference f64_mod_wrapper_function(Isolate* isolate);
 
@@ -1036,10 +1034,20 @@
   static ExternalReference address_of_the_hole_nan();
   static ExternalReference address_of_uint32_bias();
 
-  static ExternalReference math_log_double_function(Isolate* isolate);
-
-  static ExternalReference math_exp_constants(int constant_index);
-  static ExternalReference math_exp_log_table();
+  // IEEE 754 functions.
+  static ExternalReference ieee754_atan_function(Isolate* isolate);
+  static ExternalReference ieee754_atan2_function(Isolate* isolate);
+  static ExternalReference ieee754_atanh_function(Isolate* isolate);
+  static ExternalReference ieee754_cbrt_function(Isolate* isolate);
+  static ExternalReference ieee754_cos_function(Isolate* isolate);
+  static ExternalReference ieee754_exp_function(Isolate* isolate);
+  static ExternalReference ieee754_expm1_function(Isolate* isolate);
+  static ExternalReference ieee754_log_function(Isolate* isolate);
+  static ExternalReference ieee754_log1p_function(Isolate* isolate);
+  static ExternalReference ieee754_log10_function(Isolate* isolate);
+  static ExternalReference ieee754_log2_function(Isolate* isolate);
+  static ExternalReference ieee754_sin_function(Isolate* isolate);
+  static ExternalReference ieee754_tan_function(Isolate* isolate);
 
   static ExternalReference page_flags(Page* page);
 
@@ -1064,8 +1072,11 @@
 
   Address address() const { return reinterpret_cast<Address>(address_); }
 
-  // Used to check if single stepping is enabled in generated code.
-  static ExternalReference debug_step_in_enabled_address(Isolate* isolate);
+  // Used to read out the last step action of the debugger.
+  static ExternalReference debug_last_step_action_address(Isolate* isolate);
+
+  // Used to check for suspended generator, used for stepping across await call.
+  static ExternalReference debug_suspended_generator_address(Isolate* isolate);
 
 #ifndef V8_INTERPRETED_REGEXP
   // C functions called from RegExp generated code.
@@ -1128,23 +1139,14 @@
 // -----------------------------------------------------------------------------
 // Position recording support
 
-struct PositionState {
-  PositionState() : current_position(RelocInfo::kNoPosition),
-                    written_position(RelocInfo::kNoPosition),
-                    current_statement_position(RelocInfo::kNoPosition),
-                    written_statement_position(RelocInfo::kNoPosition) {}
-
-  int current_position;
-  int written_position;
-
-  int current_statement_position;
-  int written_statement_position;
-};
-
 class AssemblerPositionsRecorder : public PositionsRecorder {
  public:
   explicit AssemblerPositionsRecorder(Assembler* assembler)
-      : assembler_(assembler) {}
+      : assembler_(assembler),
+        current_position_(RelocInfo::kNoPosition),
+        written_position_(RelocInfo::kNoPosition),
+        current_statement_position_(RelocInfo::kNoPosition),
+        written_statement_position_(RelocInfo::kNoPosition) {}
 
   // Set current position to pos.
   void RecordPosition(int pos);
@@ -1152,18 +1154,17 @@
   // Set current statement position to pos.
   void RecordStatementPosition(int pos);
 
-  // Write recorded positions to relocation information.
-  bool WriteRecordedPositions();
-
-  int current_position() const { return state_.current_position; }
-
-  int current_statement_position() const {
-    return state_.current_statement_position;
-  }
-
  private:
+  // Write recorded positions to relocation information.
+  void WriteRecordedPositions();
+
   Assembler* assembler_;
-  PositionState state_;
+
+  int current_position_;
+  int written_position_;
+
+  int current_statement_position_;
+  int written_statement_position_;
 
   DISALLOW_COPY_AND_ASSIGN(AssemblerPositionsRecorder);
 };
diff --git a/src/ast/ast-expression-visitor.cc b/src/ast/ast-expression-visitor.cc
index 91d4afb..7536d90 100644
--- a/src/ast/ast-expression-visitor.cc
+++ b/src/ast/ast-expression-visitor.cc
@@ -13,395 +13,153 @@
 namespace v8 {
 namespace internal {
 
-
-#define RECURSE(call)               \
-  do {                              \
-    DCHECK(!HasStackOverflow());    \
-    call;                           \
-    if (HasStackOverflow()) return; \
-  } while (false)
-
-
-#define RECURSE_EXPRESSION(call)    \
-  do {                              \
-    DCHECK(!HasStackOverflow());    \
-    ++depth_;                       \
-    call;                           \
-    --depth_;                       \
-    if (HasStackOverflow()) return; \
-  } while (false)
-
-
 AstExpressionVisitor::AstExpressionVisitor(Isolate* isolate, Expression* root)
-    : root_(root), depth_(0) {
-  InitializeAstVisitor(isolate);
-}
-
+    : AstTraversalVisitor(isolate), root_(root) {}
 
 AstExpressionVisitor::AstExpressionVisitor(uintptr_t stack_limit,
                                            Expression* root)
-    : root_(root), depth_(0) {
-  InitializeAstVisitor(stack_limit);
-}
+    : AstTraversalVisitor(stack_limit), root_(root) {}
 
-
-void AstExpressionVisitor::Run() { RECURSE(Visit(root_)); }
-
-
-void AstExpressionVisitor::VisitVariableDeclaration(VariableDeclaration* decl) {
-}
-
-
-void AstExpressionVisitor::VisitFunctionDeclaration(FunctionDeclaration* decl) {
-  RECURSE(Visit(decl->fun()));
-}
-
-
-void AstExpressionVisitor::VisitImportDeclaration(ImportDeclaration* decl) {}
-
-
-void AstExpressionVisitor::VisitExportDeclaration(ExportDeclaration* decl) {}
-
-
-void AstExpressionVisitor::VisitStatements(ZoneList<Statement*>* stmts) {
-  for (int i = 0; i < stmts->length(); ++i) {
-    Statement* stmt = stmts->at(i);
-    RECURSE(Visit(stmt));
-    if (stmt->IsJump()) break;
-  }
-}
-
-
-void AstExpressionVisitor::VisitBlock(Block* stmt) {
-  RECURSE(VisitStatements(stmt->statements()));
-}
-
-
-void AstExpressionVisitor::VisitExpressionStatement(ExpressionStatement* stmt) {
-  RECURSE(Visit(stmt->expression()));
-}
-
-
-void AstExpressionVisitor::VisitEmptyStatement(EmptyStatement* stmt) {}
-
-
-void AstExpressionVisitor::VisitSloppyBlockFunctionStatement(
-    SloppyBlockFunctionStatement* stmt) {
-  RECURSE(Visit(stmt->statement()));
-}
-
-
-void AstExpressionVisitor::VisitIfStatement(IfStatement* stmt) {
-  RECURSE(Visit(stmt->condition()));
-  RECURSE(Visit(stmt->then_statement()));
-  RECURSE(Visit(stmt->else_statement()));
-}
-
-
-void AstExpressionVisitor::VisitContinueStatement(ContinueStatement* stmt) {}
-
-
-void AstExpressionVisitor::VisitBreakStatement(BreakStatement* stmt) {}
-
-
-void AstExpressionVisitor::VisitReturnStatement(ReturnStatement* stmt) {
-  RECURSE(Visit(stmt->expression()));
-}
-
-
-void AstExpressionVisitor::VisitWithStatement(WithStatement* stmt) {
-  RECURSE(stmt->expression());
-  RECURSE(stmt->statement());
-}
-
-
-void AstExpressionVisitor::VisitSwitchStatement(SwitchStatement* stmt) {
-  RECURSE(Visit(stmt->tag()));
-
-  ZoneList<CaseClause*>* clauses = stmt->cases();
-
-  for (int i = 0; i < clauses->length(); ++i) {
-    CaseClause* clause = clauses->at(i);
-    if (!clause->is_default()) {
-      Expression* label = clause->label();
-      RECURSE(Visit(label));
-    }
-    ZoneList<Statement*>* stmts = clause->statements();
-    RECURSE(VisitStatements(stmts));
-  }
-}
-
-
-void AstExpressionVisitor::VisitCaseClause(CaseClause* clause) {
-  UNREACHABLE();
-}
-
-
-void AstExpressionVisitor::VisitDoWhileStatement(DoWhileStatement* stmt) {
-  RECURSE(Visit(stmt->body()));
-  RECURSE(Visit(stmt->cond()));
-}
-
-
-void AstExpressionVisitor::VisitWhileStatement(WhileStatement* stmt) {
-  RECURSE(Visit(stmt->cond()));
-  RECURSE(Visit(stmt->body()));
-}
-
-
-void AstExpressionVisitor::VisitForStatement(ForStatement* stmt) {
-  if (stmt->init() != NULL) {
-    RECURSE(Visit(stmt->init()));
-  }
-  if (stmt->cond() != NULL) {
-    RECURSE(Visit(stmt->cond()));
-  }
-  if (stmt->next() != NULL) {
-    RECURSE(Visit(stmt->next()));
-  }
-  RECURSE(Visit(stmt->body()));
-}
-
-
-void AstExpressionVisitor::VisitForInStatement(ForInStatement* stmt) {
-  RECURSE(Visit(stmt->enumerable()));
-  RECURSE(Visit(stmt->body()));
-}
-
-
-void AstExpressionVisitor::VisitForOfStatement(ForOfStatement* stmt) {
-  RECURSE(Visit(stmt->assign_iterator()));
-  RECURSE(Visit(stmt->next_result()));
-  RECURSE(Visit(stmt->result_done()));
-  RECURSE(Visit(stmt->assign_each()));
-  RECURSE(Visit(stmt->body()));
-}
-
-
-void AstExpressionVisitor::VisitTryCatchStatement(TryCatchStatement* stmt) {
-  RECURSE(Visit(stmt->try_block()));
-  RECURSE(Visit(stmt->catch_block()));
-}
-
-
-void AstExpressionVisitor::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
-  RECURSE(Visit(stmt->try_block()));
-  RECURSE(Visit(stmt->finally_block()));
-}
-
-
-void AstExpressionVisitor::VisitDebuggerStatement(DebuggerStatement* stmt) {}
-
+void AstExpressionVisitor::Run() { Visit(root_); }
 
 void AstExpressionVisitor::VisitFunctionLiteral(FunctionLiteral* expr) {
-  Scope* scope = expr->scope();
   VisitExpression(expr);
-  RECURSE_EXPRESSION(VisitDeclarations(scope->declarations()));
-  RECURSE_EXPRESSION(VisitStatements(expr->body()));
+  AstTraversalVisitor::VisitFunctionLiteral(expr);
 }
 
-
 void AstExpressionVisitor::VisitNativeFunctionLiteral(
-    NativeFunctionLiteral* expr) {}
-
+    NativeFunctionLiteral* expr) {
+  AstTraversalVisitor::VisitNativeFunctionLiteral(expr);
+}
 
 void AstExpressionVisitor::VisitDoExpression(DoExpression* expr) {
   VisitExpression(expr);
-  RECURSE(VisitBlock(expr->block()));
-  RECURSE(VisitVariableProxy(expr->result()));
+  AstTraversalVisitor::VisitDoExpression(expr);
 }
 
-
 void AstExpressionVisitor::VisitConditional(Conditional* expr) {
   VisitExpression(expr);
-  RECURSE_EXPRESSION(Visit(expr->condition()));
-  RECURSE_EXPRESSION(Visit(expr->then_expression()));
-  RECURSE_EXPRESSION(Visit(expr->else_expression()));
+  AstTraversalVisitor::VisitConditional(expr);
 }
 
-
 void AstExpressionVisitor::VisitVariableProxy(VariableProxy* expr) {
   VisitExpression(expr);
+  AstTraversalVisitor::VisitVariableProxy(expr);
 }
 
-
 void AstExpressionVisitor::VisitLiteral(Literal* expr) {
   VisitExpression(expr);
+  AstTraversalVisitor::VisitLiteral(expr);
 }
 
-
 void AstExpressionVisitor::VisitRegExpLiteral(RegExpLiteral* expr) {
   VisitExpression(expr);
+  AstTraversalVisitor::VisitRegExpLiteral(expr);
 }
 
-
 void AstExpressionVisitor::VisitObjectLiteral(ObjectLiteral* expr) {
   VisitExpression(expr);
-  ZoneList<ObjectLiteralProperty*>* props = expr->properties();
-  for (int i = 0; i < props->length(); ++i) {
-    ObjectLiteralProperty* prop = props->at(i);
-    if (!prop->key()->IsLiteral()) {
-      RECURSE_EXPRESSION(Visit(prop->key()));
-    }
-    RECURSE_EXPRESSION(Visit(prop->value()));
-  }
+  AstTraversalVisitor::VisitObjectLiteral(expr);
 }
 
-
 void AstExpressionVisitor::VisitArrayLiteral(ArrayLiteral* expr) {
   VisitExpression(expr);
-  ZoneList<Expression*>* values = expr->values();
-  for (int i = 0; i < values->length(); ++i) {
-    Expression* value = values->at(i);
-    RECURSE_EXPRESSION(Visit(value));
-  }
+  AstTraversalVisitor::VisitArrayLiteral(expr);
 }
 
-
 void AstExpressionVisitor::VisitAssignment(Assignment* expr) {
   VisitExpression(expr);
-  RECURSE_EXPRESSION(Visit(expr->target()));
-  RECURSE_EXPRESSION(Visit(expr->value()));
+  AstTraversalVisitor::VisitAssignment(expr);
 }
 
-
 void AstExpressionVisitor::VisitYield(Yield* expr) {
   VisitExpression(expr);
-  RECURSE_EXPRESSION(Visit(expr->generator_object()));
-  RECURSE_EXPRESSION(Visit(expr->expression()));
+  AstTraversalVisitor::VisitYield(expr);
 }
 
-
 void AstExpressionVisitor::VisitThrow(Throw* expr) {
   VisitExpression(expr);
-  RECURSE_EXPRESSION(Visit(expr->exception()));
+  AstTraversalVisitor::VisitThrow(expr);
 }
 
-
 void AstExpressionVisitor::VisitProperty(Property* expr) {
   VisitExpression(expr);
-  RECURSE_EXPRESSION(Visit(expr->obj()));
-  RECURSE_EXPRESSION(Visit(expr->key()));
+  AstTraversalVisitor::VisitProperty(expr);
 }
 
-
 void AstExpressionVisitor::VisitCall(Call* expr) {
   VisitExpression(expr);
-  RECURSE_EXPRESSION(Visit(expr->expression()));
-  ZoneList<Expression*>* args = expr->arguments();
-  for (int i = 0; i < args->length(); ++i) {
-    Expression* arg = args->at(i);
-    RECURSE_EXPRESSION(Visit(arg));
-  }
+  AstTraversalVisitor::VisitCall(expr);
 }
 
-
 void AstExpressionVisitor::VisitCallNew(CallNew* expr) {
   VisitExpression(expr);
-  RECURSE_EXPRESSION(Visit(expr->expression()));
-  ZoneList<Expression*>* args = expr->arguments();
-  for (int i = 0; i < args->length(); ++i) {
-    Expression* arg = args->at(i);
-    RECURSE_EXPRESSION(Visit(arg));
-  }
+  AstTraversalVisitor::VisitCallNew(expr);
 }
 
-
 void AstExpressionVisitor::VisitCallRuntime(CallRuntime* expr) {
   VisitExpression(expr);
-  ZoneList<Expression*>* args = expr->arguments();
-  for (int i = 0; i < args->length(); ++i) {
-    Expression* arg = args->at(i);
-    RECURSE_EXPRESSION(Visit(arg));
-  }
+  AstTraversalVisitor::VisitCallRuntime(expr);
 }
 
-
 void AstExpressionVisitor::VisitUnaryOperation(UnaryOperation* expr) {
   VisitExpression(expr);
-  RECURSE_EXPRESSION(Visit(expr->expression()));
+  AstTraversalVisitor::VisitUnaryOperation(expr);
 }
 
-
 void AstExpressionVisitor::VisitCountOperation(CountOperation* expr) {
   VisitExpression(expr);
-  RECURSE_EXPRESSION(Visit(expr->expression()));
+  AstTraversalVisitor::VisitCountOperation(expr);
 }
 
-
 void AstExpressionVisitor::VisitBinaryOperation(BinaryOperation* expr) {
   VisitExpression(expr);
-  RECURSE_EXPRESSION(Visit(expr->left()));
-  RECURSE_EXPRESSION(Visit(expr->right()));
+  AstTraversalVisitor::VisitBinaryOperation(expr);
 }
 
-
 void AstExpressionVisitor::VisitCompareOperation(CompareOperation* expr) {
   VisitExpression(expr);
-  RECURSE_EXPRESSION(Visit(expr->left()));
-  RECURSE_EXPRESSION(Visit(expr->right()));
+  AstTraversalVisitor::VisitCompareOperation(expr);
 }
 
-
 void AstExpressionVisitor::VisitThisFunction(ThisFunction* expr) {
   VisitExpression(expr);
+  AstTraversalVisitor::VisitThisFunction(expr);
 }
 
-
-void AstExpressionVisitor::VisitDeclarations(ZoneList<Declaration*>* decls) {
-  for (int i = 0; i < decls->length(); ++i) {
-    Declaration* decl = decls->at(i);
-    RECURSE(Visit(decl));
-  }
-}
-
-
 void AstExpressionVisitor::VisitClassLiteral(ClassLiteral* expr) {
   VisitExpression(expr);
-  if (expr->extends() != nullptr) {
-    RECURSE_EXPRESSION(Visit(expr->extends()));
-  }
-  RECURSE_EXPRESSION(Visit(expr->constructor()));
-  ZoneList<ObjectLiteralProperty*>* props = expr->properties();
-  for (int i = 0; i < props->length(); ++i) {
-    ObjectLiteralProperty* prop = props->at(i);
-    if (!prop->key()->IsLiteral()) {
-      RECURSE_EXPRESSION(Visit(prop->key()));
-    }
-    RECURSE_EXPRESSION(Visit(prop->value()));
-  }
+  AstTraversalVisitor::VisitClassLiteral(expr);
 }
 
-
 void AstExpressionVisitor::VisitSpread(Spread* expr) {
   VisitExpression(expr);
-  RECURSE_EXPRESSION(Visit(expr->expression()));
+  AstTraversalVisitor::VisitSpread(expr);
 }
 
-
-void AstExpressionVisitor::VisitEmptyParentheses(EmptyParentheses* expr) {}
-
-
 void AstExpressionVisitor::VisitSuperPropertyReference(
     SuperPropertyReference* expr) {
   VisitExpression(expr);
-  RECURSE_EXPRESSION(VisitVariableProxy(expr->this_var()));
-  RECURSE_EXPRESSION(Visit(expr->home_object()));
+  AstTraversalVisitor::VisitSuperPropertyReference(expr);
 }
 
-
 void AstExpressionVisitor::VisitSuperCallReference(SuperCallReference* expr) {
   VisitExpression(expr);
-  RECURSE_EXPRESSION(VisitVariableProxy(expr->this_var()));
-  RECURSE_EXPRESSION(VisitVariableProxy(expr->new_target_var()));
-  RECURSE_EXPRESSION(VisitVariableProxy(expr->this_function_var()));
+  AstTraversalVisitor::VisitSuperCallReference(expr);
 }
 
+void AstExpressionVisitor::VisitCaseClause(CaseClause* expr) {
+  AstTraversalVisitor::VisitCaseClause(expr);
+}
+
+void AstExpressionVisitor::VisitEmptyParentheses(EmptyParentheses* expr) {
+  AstTraversalVisitor::VisitEmptyParentheses(expr);
+}
 
 void AstExpressionVisitor::VisitRewritableExpression(
     RewritableExpression* expr) {
   VisitExpression(expr);
-  RECURSE(Visit(expr->expression()));
+  AstTraversalVisitor::VisitRewritableExpression(expr);
 }
 
 
diff --git a/src/ast/ast-expression-visitor.h b/src/ast/ast-expression-visitor.h
index 283bc7b..3f7b9f7 100644
--- a/src/ast/ast-expression-visitor.h
+++ b/src/ast/ast-expression-visitor.h
@@ -17,7 +17,7 @@
 // A Visitor over a CompilationInfo's AST that invokes
 // VisitExpression on each expression node.
 
-class AstExpressionVisitor : public AstVisitor {
+class AstExpressionVisitor : public AstTraversalVisitor {
  public:
   AstExpressionVisitor(Isolate* isolate, Expression* root);
   AstExpressionVisitor(uintptr_t stack_limit, Expression* root);
@@ -25,20 +25,13 @@
 
  protected:
   virtual void VisitExpression(Expression* expression) = 0;
-  int depth() { return depth_; }
-
-  void VisitDeclarations(ZoneList<Declaration*>* d) override;
-  void VisitStatements(ZoneList<Statement*>* s) override;
 
  private:
-  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-
 #define DECLARE_VISIT(type) void Visit##type(type* node) override;
-  AST_NODE_LIST(DECLARE_VISIT)
+  EXPRESSION_NODE_LIST(DECLARE_VISIT)
 #undef DECLARE_VISIT
 
   Expression* root_;
-  int depth_;
 
   DISALLOW_COPY_AND_ASSIGN(AstExpressionVisitor);
 };
diff --git a/src/ast/ast-numbering.cc b/src/ast/ast-numbering.cc
index bd96026..dd6ce4a 100644
--- a/src/ast/ast-numbering.cc
+++ b/src/ast/ast-numbering.cc
@@ -220,7 +220,6 @@
   node->set_yield_id(yield_count_);
   yield_count_++;
   IncrementNodeCount();
-  DisableOptimization(kYield);
   ReserveFeedbackSlots(node);
   node->set_base_id(ReserveIdRange(Yield::num_ids()));
   Visit(node->generator_object());
@@ -306,7 +305,7 @@
 
 void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
   IncrementNodeCount();
-  DisableOptimization(kTryCatchStatement);
+  DisableCrankshaft(kTryCatchStatement);
   Visit(node->try_block());
   Visit(node->catch_block());
 }
@@ -314,7 +313,7 @@
 
 void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
   IncrementNodeCount();
-  DisableOptimization(kTryFinallyStatement);
+  DisableCrankshaft(kTryFinallyStatement);
   Visit(node->try_block());
   Visit(node->finally_block());
 }
@@ -579,6 +578,13 @@
     DisableCrankshaft(kRestParameter);
   }
 
+  if (IsGeneratorFunction(node->kind()) || IsAsyncFunction(node->kind())) {
+    // TODO(neis): We may want to allow Turbofan optimization here if
+    // --turbo-from-bytecode is set and we know that Ignition is used.
+    // Unfortunately we can't express that here.
+    DisableOptimization(kGenerator);
+  }
+
   VisitDeclarations(scope->declarations());
   VisitStatements(node->body());
 
diff --git a/src/ast/ast-value-factory.cc b/src/ast/ast-value-factory.cc
index 189d4cc..92322a0 100644
--- a/src/ast/ast-value-factory.cc
+++ b/src/ast/ast-value-factory.cc
@@ -360,7 +360,7 @@
   // against the AstRawStrings which are in the string_table_. We should not
   // return this AstRawString.
   AstRawString key(is_one_byte, literal_bytes, hash);
-  HashMap::Entry* entry = string_table_.LookupOrInsert(&key, hash);
+  base::HashMap::Entry* entry = string_table_.LookupOrInsert(&key, hash);
   if (entry->value == NULL) {
     // Copy literal contents for later comparison.
     int length = literal_bytes.length();
diff --git a/src/ast/ast-value-factory.h b/src/ast/ast-value-factory.h
index 041581b..b0019a5 100644
--- a/src/ast/ast-value-factory.h
+++ b/src/ast/ast-value-factory.h
@@ -29,7 +29,7 @@
 #define V8_AST_AST_VALUE_FACTORY_H_
 
 #include "src/api.h"
-#include "src/hashmap.h"
+#include "src/base/hashmap.h"
 #include "src/utils.h"
 
 // AstString, AstValue and AstValueFactory are for storing strings and values
@@ -265,7 +265,6 @@
   F(next, "next")                               \
   F(proto, "__proto__")                         \
   F(prototype, "prototype")                     \
-  F(rest_parameter, ".rest_parameter")          \
   F(return, "return")                           \
   F(set_space, "set ")                          \
   F(this, "this")                               \
@@ -352,7 +351,7 @@
   static bool AstRawStringCompare(void* a, void* b);
 
   // All strings are copied here, one after another (no NULLs inbetween).
-  HashMap string_table_;
+  base::HashMap string_table_;
   // For keeping track of all AstValues and AstRawStrings we've created (so that
   // they can be internalized later).
   List<AstValue*> values_;
diff --git a/src/ast/ast.cc b/src/ast/ast.cc
index 7c83e3b..4ad4585 100644
--- a/src/ast/ast.cc
+++ b/src/ast/ast.cc
@@ -8,14 +8,14 @@
 
 #include "src/ast/prettyprinter.h"
 #include "src/ast/scopes.h"
+#include "src/base/hashmap.h"
 #include "src/builtins.h"
 #include "src/code-stubs.h"
 #include "src/contexts.h"
 #include "src/conversions.h"
-#include "src/hashmap.h"
 #include "src/parsing/parser.h"
-#include "src/property.h"
 #include "src/property-details.h"
+#include "src/property.h"
 #include "src/string-stream.h"
 #include "src/type-info.h"
 
@@ -59,12 +59,19 @@
 
 
 bool Expression::IsNullLiteral() const {
-  return IsLiteral() && AsLiteral()->value()->IsNull();
+  if (!IsLiteral()) return false;
+  Handle<Object> value = AsLiteral()->value();
+  return !value->IsSmi() &&
+         value->IsNull(HeapObject::cast(*value)->GetIsolate());
 }
 
 bool Expression::IsUndefinedLiteral() const {
-  if (IsLiteral() && AsLiteral()->value()->IsUndefined()) {
-    return true;
+  if (IsLiteral()) {
+    Handle<Object> value = AsLiteral()->value();
+    if (!value->IsSmi() &&
+        value->IsUndefined(HeapObject::cast(*value)->GetIsolate())) {
+      return true;
+    }
   }
 
   const VariableProxy* var_proxy = AsVariableProxy();
@@ -120,17 +127,17 @@
   if (UsesVariableFeedbackSlot()) {
     // VariableProxies that point to the same Variable within a function can
     // make their loads from the same IC slot.
-    if (var()->IsUnallocated()) {
+    if (var()->IsUnallocated() || var()->mode() == DYNAMIC_GLOBAL) {
       ZoneHashMap::Entry* entry = cache->Get(var());
       if (entry != NULL) {
         variable_feedback_slot_ = FeedbackVectorSlot(
             static_cast<int>(reinterpret_cast<intptr_t>(entry->value)));
         return;
       }
-    }
-    variable_feedback_slot_ = spec->AddLoadICSlot();
-    if (var()->IsUnallocated()) {
+      variable_feedback_slot_ = spec->AddLoadGlobalICSlot(var()->name());
       cache->Put(var(), variable_feedback_slot_);
+    } else {
+      variable_feedback_slot_ = spec->AddLoadICSlot();
     }
   }
 }
@@ -387,7 +394,7 @@
     if (property->is_computed_name()) continue;
     if (property->kind() == ObjectLiteral::Property::PROTOTYPE) continue;
     Literal* literal = property->key()->AsLiteral();
-    DCHECK(!literal->value()->IsNull());
+    DCHECK(!literal->IsNullLiteral());
 
     // If there is an existing entry do not emit a store unless the previous
     // entry was also an accessor.
@@ -457,11 +464,11 @@
     // (value->IsNumber()).
     // TODO(verwaest): Remove once we can store them inline.
     if (FLAG_track_double_fields &&
-        (value->IsNumber() || value->IsUninitialized())) {
+        (value->IsNumber() || value->IsUninitialized(isolate))) {
       may_store_doubles_ = true;
     }
 
-    is_simple = is_simple && !value->IsUninitialized();
+    is_simple = is_simple && !value->IsUninitialized(isolate);
 
     // Keep track of the number of elements in the object literal and
     // the largest element index.  If the largest element index is
@@ -524,12 +531,12 @@
     // New handle scope here, needs to be after BuildContants().
     HandleScope scope(isolate);
     Handle<Object> boilerplate_value = GetBoilerplateValue(element, isolate);
-    if (boilerplate_value->IsTheHole()) {
+    if (boilerplate_value->IsTheHole(isolate)) {
       is_holey = true;
       continue;
     }
 
-    if (boilerplate_value->IsUninitialized()) {
+    if (boilerplate_value->IsUninitialized(isolate)) {
       boilerplate_value = handle(Smi::FromInt(0), isolate);
       is_simple = false;
     }
@@ -816,54 +823,74 @@
 // ----------------------------------------------------------------------------
 // Implementation of AstTraversalVisitor
 
-AstTraversalVisitor::AstTraversalVisitor(Isolate* isolate) {
+#define RECURSE(call)               \
+  do {                              \
+    DCHECK(!HasStackOverflow());    \
+    call;                           \
+    if (HasStackOverflow()) return; \
+  } while (false)
+
+#define RECURSE_EXPRESSION(call)    \
+  do {                              \
+    DCHECK(!HasStackOverflow());    \
+    ++depth_;                       \
+    call;                           \
+    --depth_;                       \
+    if (HasStackOverflow()) return; \
+  } while (false)
+
+AstTraversalVisitor::AstTraversalVisitor(Isolate* isolate) : depth_(0) {
   InitializeAstVisitor(isolate);
 }
 
+AstTraversalVisitor::AstTraversalVisitor(uintptr_t stack_limit) : depth_(0) {
+  InitializeAstVisitor(stack_limit);
+}
+
+void AstTraversalVisitor::VisitDeclarations(ZoneList<Declaration*>* decls) {
+  for (int i = 0; i < decls->length(); ++i) {
+    Declaration* decl = decls->at(i);
+    RECURSE(Visit(decl));
+  }
+}
+
+void AstTraversalVisitor::VisitStatements(ZoneList<Statement*>* stmts) {
+  for (int i = 0; i < stmts->length(); ++i) {
+    Statement* stmt = stmts->at(i);
+    RECURSE(Visit(stmt));
+    if (stmt->IsJump()) break;
+  }
+}
+
 void AstTraversalVisitor::VisitVariableDeclaration(VariableDeclaration* decl) {}
 
 void AstTraversalVisitor::VisitFunctionDeclaration(FunctionDeclaration* decl) {
-  Visit(decl->fun());
+  RECURSE(Visit(decl->fun()));
 }
 
 void AstTraversalVisitor::VisitImportDeclaration(ImportDeclaration* decl) {}
 
 void AstTraversalVisitor::VisitExportDeclaration(ExportDeclaration* decl) {}
 
-void AstTraversalVisitor::VisitStatements(ZoneList<Statement*>* stmts) {
-  for (int i = 0; i < stmts->length(); ++i) {
-    Statement* stmt = stmts->at(i);
-    Visit(stmt);
-    if (stmt->IsJump()) break;
-  }
-}
-
-void AstTraversalVisitor::VisitExpressions(ZoneList<Expression*>* expressions) {
-  for (int i = 0; i < expressions->length(); i++) {
-    Expression* expression = expressions->at(i);
-    if (expression != NULL) Visit(expression);
-  }
-}
-
 void AstTraversalVisitor::VisitBlock(Block* stmt) {
-  VisitStatements(stmt->statements());
+  RECURSE(VisitStatements(stmt->statements()));
 }
 
 void AstTraversalVisitor::VisitExpressionStatement(ExpressionStatement* stmt) {
-  Visit(stmt->expression());
+  RECURSE(Visit(stmt->expression()));
 }
 
 void AstTraversalVisitor::VisitEmptyStatement(EmptyStatement* stmt) {}
 
 void AstTraversalVisitor::VisitSloppyBlockFunctionStatement(
     SloppyBlockFunctionStatement* stmt) {
-  Visit(stmt->statement());
+  RECURSE(Visit(stmt->statement()));
 }
 
 void AstTraversalVisitor::VisitIfStatement(IfStatement* stmt) {
-  Visit(stmt->condition());
-  Visit(stmt->then_statement());
-  Visit(stmt->else_statement());
+  RECURSE(Visit(stmt->condition()));
+  RECURSE(Visit(stmt->then_statement()));
+  RECURSE(Visit(stmt->else_statement()));
 }
 
 void AstTraversalVisitor::VisitContinueStatement(ContinueStatement* stmt) {}
@@ -871,16 +898,16 @@
 void AstTraversalVisitor::VisitBreakStatement(BreakStatement* stmt) {}
 
 void AstTraversalVisitor::VisitReturnStatement(ReturnStatement* stmt) {
-  Visit(stmt->expression());
+  RECURSE(Visit(stmt->expression()));
 }
 
 void AstTraversalVisitor::VisitWithStatement(WithStatement* stmt) {
-  stmt->expression();
-  stmt->statement();
+  RECURSE(stmt->expression());
+  RECURSE(stmt->statement());
 }
 
 void AstTraversalVisitor::VisitSwitchStatement(SwitchStatement* stmt) {
-  Visit(stmt->tag());
+  RECURSE(Visit(stmt->tag()));
 
   ZoneList<CaseClause*>* clauses = stmt->cases();
 
@@ -888,81 +915,81 @@
     CaseClause* clause = clauses->at(i);
     if (!clause->is_default()) {
       Expression* label = clause->label();
-      Visit(label);
+      RECURSE(Visit(label));
     }
     ZoneList<Statement*>* stmts = clause->statements();
-    VisitStatements(stmts);
+    RECURSE(VisitStatements(stmts));
   }
 }
 
 void AstTraversalVisitor::VisitCaseClause(CaseClause* clause) { UNREACHABLE(); }
 
 void AstTraversalVisitor::VisitDoWhileStatement(DoWhileStatement* stmt) {
-  Visit(stmt->body());
-  Visit(stmt->cond());
+  RECURSE(Visit(stmt->body()));
+  RECURSE(Visit(stmt->cond()));
 }
 
 void AstTraversalVisitor::VisitWhileStatement(WhileStatement* stmt) {
-  Visit(stmt->cond());
-  Visit(stmt->body());
+  RECURSE(Visit(stmt->cond()));
+  RECURSE(Visit(stmt->body()));
 }
 
 void AstTraversalVisitor::VisitForStatement(ForStatement* stmt) {
   if (stmt->init() != NULL) {
-    Visit(stmt->init());
+    RECURSE(Visit(stmt->init()));
   }
   if (stmt->cond() != NULL) {
-    Visit(stmt->cond());
+    RECURSE(Visit(stmt->cond()));
   }
   if (stmt->next() != NULL) {
-    Visit(stmt->next());
+    RECURSE(Visit(stmt->next()));
   }
-  Visit(stmt->body());
+  RECURSE(Visit(stmt->body()));
 }
 
 void AstTraversalVisitor::VisitForInStatement(ForInStatement* stmt) {
-  Visit(stmt->enumerable());
-  Visit(stmt->body());
+  RECURSE(Visit(stmt->enumerable()));
+  RECURSE(Visit(stmt->body()));
 }
 
 void AstTraversalVisitor::VisitForOfStatement(ForOfStatement* stmt) {
-  Visit(stmt->assign_iterator());
-  Visit(stmt->next_result());
-  Visit(stmt->result_done());
-  Visit(stmt->assign_each());
-  Visit(stmt->body());
+  RECURSE(Visit(stmt->assign_iterator()));
+  RECURSE(Visit(stmt->next_result()));
+  RECURSE(Visit(stmt->result_done()));
+  RECURSE(Visit(stmt->assign_each()));
+  RECURSE(Visit(stmt->body()));
 }
 
 void AstTraversalVisitor::VisitTryCatchStatement(TryCatchStatement* stmt) {
-  Visit(stmt->try_block());
-  Visit(stmt->catch_block());
+  RECURSE(Visit(stmt->try_block()));
+  RECURSE(Visit(stmt->catch_block()));
 }
 
 void AstTraversalVisitor::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
-  Visit(stmt->try_block());
-  Visit(stmt->finally_block());
+  RECURSE(Visit(stmt->try_block()));
+  RECURSE(Visit(stmt->finally_block()));
 }
 
 void AstTraversalVisitor::VisitDebuggerStatement(DebuggerStatement* stmt) {}
 
 void AstTraversalVisitor::VisitFunctionLiteral(FunctionLiteral* expr) {
   Scope* scope = expr->scope();
-  VisitDeclarations(scope->declarations());
-  VisitStatements(expr->body());
+  RECURSE_EXPRESSION(VisitDeclarations(scope->declarations()));
+  RECURSE_EXPRESSION(VisitStatements(expr->body()));
 }
 
 void AstTraversalVisitor::VisitNativeFunctionLiteral(
     NativeFunctionLiteral* expr) {}
 
 void AstTraversalVisitor::VisitDoExpression(DoExpression* expr) {
-  VisitBlock(expr->block());
-  VisitVariableProxy(expr->result());
+  RECURSE(VisitBlock(expr->block()));
+  RECURSE(VisitVariableProxy(expr->result()));
 }
 
 void AstTraversalVisitor::VisitConditional(Conditional* expr) {
-  Visit(expr->condition());
-  Visit(expr->then_expression());
-  Visit(expr->else_expression());
+  RECURSE_EXPRESSION(Visit(expr->condition()));
+  RECURSE_EXPRESSION(Visit(expr->then_expression()));
+  RECURSE_EXPRESSION(Visit(expr->else_expression()));
 }
 
 void AstTraversalVisitor::VisitVariableProxy(VariableProxy* expr) {}
@@ -976,9 +1003,9 @@
   for (int i = 0; i < props->length(); ++i) {
     ObjectLiteralProperty* prop = props->at(i);
     if (!prop->key()->IsLiteral()) {
-      Visit(prop->key());
+      RECURSE_EXPRESSION(Visit(prop->key()));
     }
-    Visit(prop->value());
+    RECURSE_EXPRESSION(Visit(prop->value()));
   }
 }
 
@@ -986,42 +1013,44 @@
   ZoneList<Expression*>* values = expr->values();
   for (int i = 0; i < values->length(); ++i) {
     Expression* value = values->at(i);
-    Visit(value);
+    RECURSE_EXPRESSION(Visit(value));
   }
 }
 
 void AstTraversalVisitor::VisitAssignment(Assignment* expr) {
-  Visit(expr->target());
-  Visit(expr->value());
+  RECURSE_EXPRESSION(Visit(expr->target()));
+  RECURSE_EXPRESSION(Visit(expr->value()));
 }
 
 void AstTraversalVisitor::VisitYield(Yield* expr) {
-  Visit(expr->generator_object());
-  Visit(expr->expression());
+  RECURSE_EXPRESSION(Visit(expr->generator_object()));
+  RECURSE_EXPRESSION(Visit(expr->expression()));
 }
 
-void AstTraversalVisitor::VisitThrow(Throw* expr) { Visit(expr->exception()); }
+void AstTraversalVisitor::VisitThrow(Throw* expr) {
+  RECURSE_EXPRESSION(Visit(expr->exception()));
+}
 
 void AstTraversalVisitor::VisitProperty(Property* expr) {
-  Visit(expr->obj());
-  Visit(expr->key());
+  RECURSE_EXPRESSION(Visit(expr->obj()));
+  RECURSE_EXPRESSION(Visit(expr->key()));
 }
 
 void AstTraversalVisitor::VisitCall(Call* expr) {
-  Visit(expr->expression());
+  RECURSE_EXPRESSION(Visit(expr->expression()));
   ZoneList<Expression*>* args = expr->arguments();
   for (int i = 0; i < args->length(); ++i) {
     Expression* arg = args->at(i);
-    Visit(arg);
+    RECURSE_EXPRESSION(Visit(arg));
   }
 }
 
 void AstTraversalVisitor::VisitCallNew(CallNew* expr) {
-  Visit(expr->expression());
+  RECURSE_EXPRESSION(Visit(expr->expression()));
   ZoneList<Expression*>* args = expr->arguments();
   for (int i = 0; i < args->length(); ++i) {
     Expression* arg = args->at(i);
-    Visit(arg);
+    RECURSE_EXPRESSION(Visit(arg));
   }
 }
 
@@ -1029,75 +1058,71 @@
   ZoneList<Expression*>* args = expr->arguments();
   for (int i = 0; i < args->length(); ++i) {
     Expression* arg = args->at(i);
-    Visit(arg);
+    RECURSE_EXPRESSION(Visit(arg));
   }
 }
 
 void AstTraversalVisitor::VisitUnaryOperation(UnaryOperation* expr) {
-  Visit(expr->expression());
+  RECURSE_EXPRESSION(Visit(expr->expression()));
 }
 
 void AstTraversalVisitor::VisitCountOperation(CountOperation* expr) {
-  Visit(expr->expression());
+  RECURSE_EXPRESSION(Visit(expr->expression()));
 }
 
 void AstTraversalVisitor::VisitBinaryOperation(BinaryOperation* expr) {
-  Visit(expr->left());
-  Visit(expr->right());
+  RECURSE_EXPRESSION(Visit(expr->left()));
+  RECURSE_EXPRESSION(Visit(expr->right()));
 }
 
 void AstTraversalVisitor::VisitCompareOperation(CompareOperation* expr) {
-  Visit(expr->left());
-  Visit(expr->right());
+  RECURSE_EXPRESSION(Visit(expr->left()));
+  RECURSE_EXPRESSION(Visit(expr->right()));
 }
 
 void AstTraversalVisitor::VisitThisFunction(ThisFunction* expr) {}
 
-void AstTraversalVisitor::VisitDeclarations(ZoneList<Declaration*>* decls) {
-  for (int i = 0; i < decls->length(); ++i) {
-    Declaration* decl = decls->at(i);
-    Visit(decl);
-  }
-}
-
 void AstTraversalVisitor::VisitClassLiteral(ClassLiteral* expr) {
   if (expr->extends() != nullptr) {
-    Visit(expr->extends());
+    RECURSE_EXPRESSION(Visit(expr->extends()));
   }
-  Visit(expr->constructor());
+  RECURSE_EXPRESSION(Visit(expr->constructor()));
   ZoneList<ObjectLiteralProperty*>* props = expr->properties();
   for (int i = 0; i < props->length(); ++i) {
     ObjectLiteralProperty* prop = props->at(i);
     if (!prop->key()->IsLiteral()) {
-      Visit(prop->key());
+      RECURSE_EXPRESSION(Visit(prop->key()));
     }
-    Visit(prop->value());
+    RECURSE_EXPRESSION(Visit(prop->value()));
   }
 }
 
 void AstTraversalVisitor::VisitSpread(Spread* expr) {
-  Visit(expr->expression());
+  RECURSE_EXPRESSION(Visit(expr->expression()));
 }
 
 void AstTraversalVisitor::VisitEmptyParentheses(EmptyParentheses* expr) {}
 
 void AstTraversalVisitor::VisitSuperPropertyReference(
     SuperPropertyReference* expr) {
-  VisitVariableProxy(expr->this_var());
-  Visit(expr->home_object());
+  RECURSE_EXPRESSION(VisitVariableProxy(expr->this_var()));
+  RECURSE_EXPRESSION(Visit(expr->home_object()));
 }
 
 void AstTraversalVisitor::VisitSuperCallReference(SuperCallReference* expr) {
-  VisitVariableProxy(expr->this_var());
-  VisitVariableProxy(expr->new_target_var());
-  VisitVariableProxy(expr->this_function_var());
+  RECURSE_EXPRESSION(VisitVariableProxy(expr->this_var()));
+  RECURSE_EXPRESSION(VisitVariableProxy(expr->new_target_var()));
+  RECURSE_EXPRESSION(VisitVariableProxy(expr->this_function_var()));
 }
 
 void AstTraversalVisitor::VisitRewritableExpression(
     RewritableExpression* expr) {
-  Visit(expr->expression());
+  RECURSE(Visit(expr->expression()));
 }
 
+#undef RECURSE_EXPRESSION
+#undef RECURSE
+
 CaseClause::CaseClause(Zone* zone, Expression* label,
                        ZoneList<Statement*>* statements, int pos)
     : Expression(zone, pos),
diff --git a/src/ast/ast.h b/src/ast/ast.h
index bee0bab..5ae90f8 100644
--- a/src/ast/ast.h
+++ b/src/ast/ast.h
@@ -130,7 +130,8 @@
  public:
   explicit FeedbackVectorSlotCache(Zone* zone)
       : zone_(zone),
-        hash_map_(HashMap::PointersMatch, ZoneHashMap::kDefaultHashMapCapacity,
+        hash_map_(base::HashMap::PointersMatch,
+                  ZoneHashMap::kDefaultHashMapCapacity,
                   ZoneAllocationPolicy(zone)) {}
 
   void Put(Variable* variable, FeedbackVectorSlot slot) {
@@ -1501,9 +1502,10 @@
   };
 
   struct Accessors: public ZoneObject {
-    Accessors() : getter(NULL), setter(NULL) {}
+    Accessors() : getter(NULL), setter(NULL), bailout_id(BailoutId::None()) {}
     ObjectLiteralProperty* getter;
     ObjectLiteralProperty* setter;
+    BailoutId bailout_id;
   };
 
   BailoutId CreateLiteralId() const { return BailoutId(local_id(0)); }
@@ -1551,13 +1553,14 @@
 
 
 // A map from property names to getter/setter pairs allocated in the zone.
-class AccessorTable : public TemplateHashMap<Literal, ObjectLiteral::Accessors,
-                                             ZoneAllocationPolicy> {
+class AccessorTable
+    : public base::TemplateHashMap<Literal, ObjectLiteral::Accessors,
+                                   ZoneAllocationPolicy> {
  public:
   explicit AccessorTable(Zone* zone)
-      : TemplateHashMap<Literal, ObjectLiteral::Accessors,
-                        ZoneAllocationPolicy>(Literal::Match,
-                                              ZoneAllocationPolicy(zone)),
+      : base::TemplateHashMap<Literal, ObjectLiteral::Accessors,
+                              ZoneAllocationPolicy>(Literal::Match,
+                                                    ZoneAllocationPolicy(zone)),
         zone_(zone) {}
 
   Iterator lookup(Literal* literal) {
@@ -2004,6 +2007,9 @@
   void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
                                  FeedbackVectorSlotCache* cache) override {
     callnew_feedback_slot_ = spec->AddGeneralSlot();
+    // Construct calls have two slots, one right after the other.
+    // The second slot stores the call count for monomorphic calls.
+    spec->AddGeneralSlot();
   }
 
   FeedbackVectorSlot CallNewFeedbackSlot() {
@@ -3053,20 +3059,26 @@
 class AstTraversalVisitor : public AstVisitor {
  public:
   explicit AstTraversalVisitor(Isolate* isolate);
+  explicit AstTraversalVisitor(uintptr_t stack_limit);
   virtual ~AstTraversalVisitor() {}
 
   // Iteration left-to-right.
   void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
   void VisitStatements(ZoneList<Statement*>* statements) override;
-  void VisitExpressions(ZoneList<Expression*>* expressions) override;
 
 // Individual nodes
 #define DECLARE_VISIT(type) void Visit##type(type* node) override;
   AST_NODE_LIST(DECLARE_VISIT)
 #undef DECLARE_VISIT
 
+ protected:
+  int depth() { return depth_; }
+
  private:
   DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
+  int depth_;
+
   DISALLOW_COPY_AND_ASSIGN(AstTraversalVisitor);
 };
 
diff --git a/src/ast/prettyprinter.cc b/src/ast/prettyprinter.cc
index 49bff08..d1673c3 100644
--- a/src/ast/prettyprinter.cc
+++ b/src/ast/prettyprinter.cc
@@ -14,6 +14,7 @@
 namespace internal {
 
 CallPrinter::CallPrinter(Isolate* isolate, bool is_builtin) {
+  isolate_ = isolate;
   output_ = NULL;
   size_ = 0;
   pos_ = 0;
@@ -440,13 +441,13 @@
     if (quote) Print("\"");
     Print("%s", String::cast(object)->ToCString().get());
     if (quote) Print("\"");
-  } else if (object->IsNull()) {
+  } else if (object->IsNull(isolate_)) {
     Print("null");
-  } else if (object->IsTrue()) {
+  } else if (object->IsTrue(isolate_)) {
     Print("true");
-  } else if (object->IsFalse()) {
+  } else if (object->IsFalse(isolate_)) {
     Print("false");
-  } else if (object->IsUndefined()) {
+  } else if (object->IsUndefined(isolate_)) {
     Print("undefined");
   } else if (object->IsNumber()) {
     Print("%g", object->Number());
@@ -479,6 +480,7 @@
 
 
 PrettyPrinter::PrettyPrinter(Isolate* isolate) {
+  isolate_ = isolate;
   output_ = NULL;
   size_ = 0;
   pos_ = 0;
@@ -1067,13 +1069,13 @@
       Print("%c", string->Get(i));
     }
     if (quote) Print("\"");
-  } else if (object->IsNull()) {
+  } else if (object->IsNull(isolate_)) {
     Print("null");
-  } else if (object->IsTrue()) {
+  } else if (object->IsTrue(isolate_)) {
     Print("true");
-  } else if (object->IsFalse()) {
+  } else if (object->IsFalse(isolate_)) {
     Print("false");
-  } else if (object->IsUndefined()) {
+  } else if (object->IsUndefined(isolate_)) {
     Print("undefined");
   } else if (object->IsNumber()) {
     Print("%g", object->Number());
@@ -1092,7 +1094,7 @@
   } else if (object->IsFixedArray()) {
     Print("FixedArray");
   } else {
-    Print("<unknown literal %p>", object);
+    Print("<unknown literal %p>", static_cast<void*>(object));
   }
 }
 
diff --git a/src/ast/prettyprinter.h b/src/ast/prettyprinter.h
index 4e90294..bb36c2b 100644
--- a/src/ast/prettyprinter.h
+++ b/src/ast/prettyprinter.h
@@ -32,6 +32,7 @@
 
  private:
   void Init();
+  Isolate* isolate_;
   char* output_;  // output string buffer
   int size_;      // output_ size
   int pos_;       // current printing position
@@ -74,6 +75,7 @@
 #undef DECLARE_VISIT
 
  private:
+  Isolate* isolate_;
   char* output_;  // output string buffer
   int size_;  // output_ size
   int pos_;  // current printing position
diff --git a/src/ast/scopes.cc b/src/ast/scopes.cc
index beffa53..77c5d62 100644
--- a/src/ast/scopes.cc
+++ b/src/ast/scopes.cc
@@ -560,6 +560,11 @@
 
 int Scope::RemoveTemporary(Variable* var) {
   DCHECK_NOT_NULL(var);
+  // Temporaries are only placed in ClosureScopes.
+  DCHECK_EQ(ClosureScope(), this);
+  DCHECK_EQ(var->scope()->ClosureScope(), var->scope());
+  // If the temporary is not here, return quickly.
+  if (var->scope() != this) return -1;
   // Most likely (always?) any temporary variable we want to remove
   // was just added before, so we search backwards.
   for (int i = temps_.length(); i-- > 0;) {
@@ -816,21 +821,6 @@
 }
 
 
-void Scope::ReportMessage(int start_position, int end_position,
-                          MessageTemplate::Template message,
-                          const AstRawString* arg) {
-  // Propagate the error to the topmost scope targeted by this scope analysis
-  // phase.
-  Scope* top = this;
-  while (!top->is_script_scope() && !top->outer_scope()->already_resolved()) {
-    top = top->outer_scope();
-  }
-
-  top->pending_error_handler_.ReportMessageAt(start_position, end_position,
-                                              message, arg, kReferenceError);
-}
-
-
 #ifdef DEBUG
 static const char* Header(ScopeType scope_type, FunctionKind function_kind,
                           bool is_declaration_scope) {
@@ -838,7 +828,10 @@
     case EVAL_SCOPE: return "eval";
     // TODO(adamk): Should we print concise method scopes specially?
     case FUNCTION_SCOPE:
-      return IsArrowFunction(function_kind) ? "arrow" : "function";
+      if (IsGeneratorFunction(function_kind)) return "function*";
+      if (IsAsyncFunction(function_kind)) return "async function";
+      if (IsArrowFunction(function_kind)) return "arrow";
+      return "function";
     case MODULE_SCOPE: return "module";
     case SCRIPT_SCOPE: return "global";
     case CATCH_SCOPE: return "catch";
diff --git a/src/ast/scopes.h b/src/ast/scopes.h
index d767a33..e420cfe 100644
--- a/src/ast/scopes.h
+++ b/src/ast/scopes.h
@@ -6,7 +6,7 @@
 #define V8_AST_SCOPES_H_
 
 #include "src/ast/ast.h"
-#include "src/hashmap.h"
+#include "src/base/hashmap.h"
 #include "src/pending-compilation-error-handler.h"
 #include "src/zone.h"
 
@@ -216,7 +216,11 @@
   // Adds a temporary variable in this scope's TemporaryScope. This is for
   // adjusting the scope of temporaries used when desugaring parameter
   // initializers.
-  void AddTemporary(Variable* var) { temps_.Add(var, zone()); }
+  void AddTemporary(Variable* var) {
+    // Temporaries are only placed in ClosureScopes.
+    DCHECK_EQ(ClosureScope(), this);
+    temps_.Add(var, zone());
+  }
 
   // Adds the specific declaration node to the list of declarations in
   // this scope. The declarations are processed as part of entering
@@ -571,11 +575,6 @@
     return &sloppy_block_function_map_;
   }
 
-  // Error handling.
-  void ReportMessage(int start_position, int end_position,
-                     MessageTemplate::Template message,
-                     const AstRawString* arg);
-
   // ---------------------------------------------------------------------------
   // Debugging.
 
diff --git a/src/ast/variables.cc b/src/ast/variables.cc
index 2950db4..9048f79 100644
--- a/src/ast/variables.cc
+++ b/src/ast/variables.cc
@@ -40,7 +40,6 @@
       index_(-1),
       initializer_position_(RelocInfo::kNoPosition),
       local_if_not_shadowed_(NULL),
-      is_from_eval_(false),
       force_context_allocation_(false),
       is_used_(false),
       initialization_flag_(initialization_flag),
diff --git a/src/ast/variables.h b/src/ast/variables.h
index b8bb07e..7d54bc0 100644
--- a/src/ast/variables.h
+++ b/src/ast/variables.h
@@ -119,21 +119,8 @@
     index_ = index;
   }
 
-  void SetFromEval() { is_from_eval_ = true; }
-
   static int CompareIndex(Variable* const* v, Variable* const* w);
 
-  PropertyAttributes DeclarationPropertyAttributes() const {
-    int property_attributes = NONE;
-    if (IsImmutableVariableMode(mode_)) {
-      property_attributes |= READ_ONLY;
-    }
-    if (is_from_eval_) {
-      property_attributes |= EVAL_DECLARED;
-    }
-    return static_cast<PropertyAttributes>(property_attributes);
-  }
-
  private:
   Scope* scope_;
   const AstRawString* name_;
@@ -149,9 +136,6 @@
   // binding scope (exclusive).
   Variable* local_if_not_shadowed_;
 
-  // True if this variable is introduced by a sloppy eval
-  bool is_from_eval_;
-
   // Usage info.
   bool force_context_allocation_;  // set by variable resolver
   bool is_used_;
diff --git a/src/bailout-reason.h b/src/bailout-reason.h
index c44ad85..e47a93c 100644
--- a/src/bailout-reason.h
+++ b/src/bailout-reason.h
@@ -267,8 +267,7 @@
   V(kWrongArgumentCountForInvokeIntrinsic,                                     \
     "Wrong number of arguments for intrinsic")                                 \
   V(kShouldNotDirectlyEnterOsrFunction,                                        \
-    "Should not directly enter OSR-compiled function")                         \
-  V(kYield, "Yield")
+    "Should not directly enter OSR-compiled function")
 
 #define ERROR_MESSAGES_CONSTANTS(C, T) C,
 enum BailoutReason {
diff --git a/src/base/atomicops_internals_mips64_gcc.h b/src/base/atomicops_internals_mips64_gcc.h
index 85b4e46..cf2e194 100644
--- a/src/base/atomicops_internals_mips64_gcc.h
+++ b/src/base/atomicops_internals_mips64_gcc.h
@@ -91,18 +91,19 @@
                                           Atomic32 increment) {
   Atomic32 temp, temp2;
 
-  __asm__ __volatile__(".set push\n"
-                       ".set noreorder\n"
-                       "1:\n"
-                       "ll %0, %2\n"  // temp = *ptr
-                       "addu %1, %0, %3\n"  // temp2 = temp + increment
-                       "sc %1, %2\n"  // *ptr = temp2 (with atomic check)
-                       "beqz %1, 1b\n"  // start again on atomic error
-                       "addu %1, %0, %3\n"  // temp2 = temp + increment
-                       ".set pop\n"
-                       : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
-                       : "Ir" (increment), "m" (*ptr)
-                       : "memory");
+  __asm__ __volatile__(
+      ".set push\n"
+      ".set noreorder\n"
+      "1:\n"
+      "ll %0, %2\n"        // temp = *ptr
+      "addu %1, %0, %3\n"  // temp2 = temp + increment
+      "sc %1, %2\n"        // *ptr = temp2 (with atomic check)
+      "beqz %1, 1b\n"      // start again on atomic error
+      "addu %1, %0, %3\n"  // temp2 = temp + increment
+      ".set pop\n"
+      : "=&r"(temp), "=&r"(temp2), "=ZC"(*ptr)
+      : "Ir"(increment), "m"(*ptr)
+      : "memory");
   // temp2 now holds the final value.
   return temp2;
 }
@@ -228,18 +229,19 @@
                                           Atomic64 increment) {
   Atomic64 temp, temp2;
 
-  __asm__ __volatile__(".set push\n"
-                       ".set noreorder\n"
-                       "1:\n"
-                       "lld %0, %2\n"  // temp = *ptr
-                       "daddu %1, %0, %3\n"  // temp2 = temp + increment
-                       "scd %1, %2\n"  // *ptr = temp2 (with atomic check)
-                       "beqz %1, 1b\n"  // start again on atomic error
-                       "daddu %1, %0, %3\n"  // temp2 = temp + increment
-                       ".set pop\n"
-                       : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
-                       : "Ir" (increment), "m" (*ptr)
-                       : "memory");
+  __asm__ __volatile__(
+      ".set push\n"
+      ".set noreorder\n"
+      "1:\n"
+      "lld %0, %2\n"        // temp = *ptr
+      "daddu %1, %0, %3\n"  // temp2 = temp + increment
+      "scd %1, %2\n"        // *ptr = temp2 (with atomic check)
+      "beqz %1, 1b\n"       // start again on atomic error
+      "daddu %1, %0, %3\n"  // temp2 = temp + increment
+      ".set pop\n"
+      : "=&r"(temp), "=&r"(temp2), "=ZC"(*ptr)
+      : "Ir"(increment), "m"(*ptr)
+      : "memory");
   // temp2 now holds the final value.
   return temp2;
 }
diff --git a/src/base/bits.h b/src/base/bits.h
index 2e6527b..1bb3a0f 100644
--- a/src/base/bits.h
+++ b/src/base/bits.h
@@ -111,7 +111,6 @@
   return result;
 }
 
-
 // CountTrailingZeros32(value) returns the number of zero bits preceding the
 // least significant 1 bit in |value| if |value| is non-zero, otherwise it
 // returns 32.
@@ -147,6 +146,14 @@
 #endif
 }
 
+// Overloaded versions of CountTrailingZeros32/64.
+inline unsigned CountTrailingZeros(uint32_t value) {
+  return CountTrailingZeros32(value);
+}
+
+inline unsigned CountTrailingZeros(uint64_t value) {
+  return CountTrailingZeros64(value);
+}
 
 // Returns true iff |value| is a power of 2.
 inline bool IsPowerOfTwo32(uint32_t value) {
diff --git a/src/base/cpu.cc b/src/base/cpu.cc
index 12a3881..16eb7c9 100644
--- a/src/base/cpu.cc
+++ b/src/base/cpu.cc
@@ -338,7 +338,8 @@
       has_vfp_(false),
       has_vfp3_(false),
       has_vfp3_d32_(false),
-      is_fp64_mode_(false) {
+      is_fp64_mode_(false),
+      has_non_stop_time_stamp_counter_(false) {
   memcpy(vendor_, "Unknown", 8);
 #if V8_OS_NACL
 // Portable host shouldn't do feature detection.
@@ -419,6 +420,13 @@
     has_sahf_ = (cpu_info[2] & 0x00000001) != 0;
   }
 
+  // Check if CPU has non stoppable time stamp counter.
+  const int parameter_containing_non_stop_time_stamp_counter = 0x80000007;
+  if (num_ext_ids >= parameter_containing_non_stop_time_stamp_counter) {
+    __cpuid(cpu_info, parameter_containing_non_stop_time_stamp_counter);
+    has_non_stop_time_stamp_counter_ = (cpu_info[3] & (1 << 8)) != 0;
+  }
+
 #elif V8_HOST_ARCH_ARM
 
 #if V8_OS_LINUX
diff --git a/src/base/cpu.h b/src/base/cpu.h
index 3778d27..19d4102 100644
--- a/src/base/cpu.h
+++ b/src/base/cpu.h
@@ -97,6 +97,9 @@
   bool has_lzcnt() const { return has_lzcnt_; }
   bool has_popcnt() const { return has_popcnt_; }
   bool is_atom() const { return is_atom_; }
+  bool has_non_stop_time_stamp_counter() const {
+    return has_non_stop_time_stamp_counter_;
+  }
 
   // arm features
   bool has_idiva() const { return has_idiva_; }
@@ -148,6 +151,7 @@
   bool has_vfp3_;
   bool has_vfp3_d32_;
   bool is_fp64_mode_;
+  bool has_non_stop_time_stamp_counter_;
 };
 
 }  // namespace base
diff --git a/src/base/file-utils.cc b/src/base/file-utils.cc
new file mode 100644
index 0000000..2262df9
--- /dev/null
+++ b/src/base/file-utils.cc
@@ -0,0 +1,36 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/file-utils.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "src/base/platform/platform.h"
+
+namespace v8 {
+namespace internal {
+
+char* RelativePath(char** buffer, const char* exec_path, const char* name) {
+  DCHECK(exec_path);
+  int path_separator = static_cast<int>(strlen(exec_path)) - 1;
+  while (path_separator >= 0 &&
+         !base::OS::isDirectorySeparator(exec_path[path_separator])) {
+    path_separator--;
+  }
+  if (path_separator >= 0) {
+    int name_length = static_cast<int>(strlen(name));
+    *buffer =
+        reinterpret_cast<char*>(calloc(path_separator + name_length + 2, 1));
+    *buffer[0] = '\0';
+    strncat(*buffer, exec_path, path_separator + 1);
+    strncat(*buffer, name, name_length);
+  } else {
+    *buffer = strdup(name);
+  }
+  return *buffer;
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/base/file-utils.h b/src/base/file-utils.h
new file mode 100644
index 0000000..ce9e9a1
--- /dev/null
+++ b/src/base/file-utils.h
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_FILE_UTILS_H_
+#define V8_FILE_UTILS_H_
+
+namespace v8 {
+namespace internal {
+
+// Helper functions to manipulate file paths.
+
+char* RelativePath(char** buffer, const char* exec_path, const char* name);
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_FILE_UTILS_H_
diff --git a/src/hashmap.h b/src/base/hashmap.h
similarity index 84%
rename from src/hashmap.h
rename to src/base/hashmap.h
index f94def7..efb5dc8 100644
--- a/src/hashmap.h
+++ b/src/base/hashmap.h
@@ -2,21 +2,31 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_HASHMAP_H_
-#define V8_HASHMAP_H_
+// The reason we write our own hash map instead of using unordered_map in STL,
+// is that STL containers use a mutex pool on debug build, which will lead to
+// deadlock when we are using async signal handler.
 
-#include "src/allocation.h"
+#ifndef V8_BASE_HASHMAP_H_
+#define V8_BASE_HASHMAP_H_
+
+#include <stdlib.h>
+
 #include "src/base/bits.h"
 #include "src/base/logging.h"
-#include "src/utils.h"
 
 namespace v8 {
-namespace internal {
+namespace base {
 
-template<class AllocationPolicy>
+class DefaultAllocationPolicy {
+ public:
+  V8_INLINE void* New(size_t size) { return malloc(size); }
+  V8_INLINE static void Delete(void* p) { free(p); }
+};
+
+template <class AllocationPolicy>
 class TemplateHashMapImpl {
  public:
-  typedef bool (*MatchFun) (void* key1, void* key2);
+  typedef bool (*MatchFun)(void* key1, void* key2);
 
   // The default capacity.  This is used by the call sites which want
   // to pass in a non-default AllocationPolicy but want to use the
@@ -38,7 +48,7 @@
     void* key;
     void* value;
     uint32_t hash;  // The full hash value for key
-    int order;  // If you never remove entries this is the insertion order.
+    int order;      // If you never remove entries this is the insertion order.
   };
 
   // If an entry with matching key is found, returns that entry.
@@ -79,9 +89,7 @@
   Entry* Next(Entry* p) const;
 
   // Some match functions defined for convenience.
-  static bool PointersMatch(void* key1, void* key2) {
-    return key1 == key2;
-  }
+  static bool PointersMatch(void* key1, void* key2) { return key1 == key2; }
 
  private:
   MatchFun match_;
@@ -95,22 +103,20 @@
   void Resize(AllocationPolicy allocator);
 };
 
-typedef TemplateHashMapImpl<FreeStoreAllocationPolicy> HashMap;
+typedef TemplateHashMapImpl<DefaultAllocationPolicy> HashMap;
 
-template<class AllocationPolicy>
+template <class AllocationPolicy>
 TemplateHashMapImpl<AllocationPolicy>::TemplateHashMapImpl(
     MatchFun match, uint32_t initial_capacity, AllocationPolicy allocator) {
   match_ = match;
   Initialize(initial_capacity, allocator);
 }
 
-
-template<class AllocationPolicy>
+template <class AllocationPolicy>
 TemplateHashMapImpl<AllocationPolicy>::~TemplateHashMapImpl() {
   AllocationPolicy::Delete(map_);
 }
 
-
 template <class AllocationPolicy>
 typename TemplateHashMapImpl<AllocationPolicy>::Entry*
 TemplateHashMapImpl<AllocationPolicy>::Lookup(void* key, uint32_t hash) const {
@@ -118,7 +124,6 @@
   return p->key != NULL ? p : NULL;
 }
 
-
 template <class AllocationPolicy>
 typename TemplateHashMapImpl<AllocationPolicy>::Entry*
 TemplateHashMapImpl<AllocationPolicy>::LookupOrInsert(
@@ -145,8 +150,7 @@
   return p;
 }
 
-
-template<class AllocationPolicy>
+template <class AllocationPolicy>
 void* TemplateHashMapImpl<AllocationPolicy>::Remove(void* key, uint32_t hash) {
   // Lookup the entry for the key to remove.
   Entry* p = Probe(key, hash);
@@ -194,8 +198,7 @@
     // If the entry at position q has its initial position outside the range
     // between p and q it can be moved forward to position p and will still be
     // found. There is now a new candidate entry for clearing.
-    if ((q > p && (r <= p || r > q)) ||
-        (q < p && (r <= p && r > q))) {
+    if ((q > p && (r <= p || r > q)) || (q < p && (r <= p && r > q))) {
       *p = *q;
       p = q;
     }
@@ -207,8 +210,7 @@
   return value;
 }
 
-
-template<class AllocationPolicy>
+template <class AllocationPolicy>
 void TemplateHashMapImpl<AllocationPolicy>::Clear() {
   // Mark all entries as empty.
   const Entry* end = map_end();
@@ -218,17 +220,15 @@
   occupancy_ = 0;
 }
 
-
-template<class AllocationPolicy>
+template <class AllocationPolicy>
 typename TemplateHashMapImpl<AllocationPolicy>::Entry*
-    TemplateHashMapImpl<AllocationPolicy>::Start() const {
+TemplateHashMapImpl<AllocationPolicy>::Start() const {
   return Next(map_ - 1);
 }
 
-
-template<class AllocationPolicy>
+template <class AllocationPolicy>
 typename TemplateHashMapImpl<AllocationPolicy>::Entry*
-    TemplateHashMapImpl<AllocationPolicy>::Next(Entry* p) const {
+TemplateHashMapImpl<AllocationPolicy>::Next(Entry* p) const {
   const Entry* end = map_end();
   DCHECK(map_ - 1 <= p && p < end);
   for (p++; p < end; p++) {
@@ -239,7 +239,6 @@
   return NULL;
 }
 
-
 template <class AllocationPolicy>
 typename TemplateHashMapImpl<AllocationPolicy>::Entry*
 TemplateHashMapImpl<AllocationPolicy>::Probe(void* key, uint32_t hash) const {
@@ -261,22 +260,20 @@
   return p;
 }
 
-
-template<class AllocationPolicy>
+template <class AllocationPolicy>
 void TemplateHashMapImpl<AllocationPolicy>::Initialize(
     uint32_t capacity, AllocationPolicy allocator) {
   DCHECK(base::bits::IsPowerOfTwo32(capacity));
   map_ = reinterpret_cast<Entry*>(allocator.New(capacity * sizeof(Entry)));
   if (map_ == NULL) {
-    v8::internal::FatalProcessOutOfMemory("HashMap::Initialize");
+    FATAL("Out of memory: HashMap::Initialize");
     return;
   }
   capacity_ = capacity;
   Clear();
 }
 
-
-template<class AllocationPolicy>
+template <class AllocationPolicy>
 void TemplateHashMapImpl<AllocationPolicy>::Resize(AllocationPolicy allocator) {
   Entry* map = map_;
   uint32_t n = occupancy_;
@@ -298,12 +295,11 @@
   AllocationPolicy::Delete(map);
 }
 
-
 // A hash map for pointer keys and values with an STL-like interface.
-template<class Key, class Value, class AllocationPolicy>
-class TemplateHashMap: private TemplateHashMapImpl<AllocationPolicy> {
+template <class Key, class Value, class AllocationPolicy>
+class TemplateHashMap : private TemplateHashMapImpl<AllocationPolicy> {
  public:
-  STATIC_ASSERT(sizeof(Key*) == sizeof(void*));  // NOLINT
+  STATIC_ASSERT(sizeof(Key*) == sizeof(void*));    // NOLINT
   STATIC_ASSERT(sizeof(Value*) == sizeof(void*));  // NOLINT
   struct value_type {
     Key* first;
@@ -318,12 +314,12 @@
     }
 
     value_type* operator->() { return reinterpret_cast<value_type*>(entry_); }
-    bool operator!=(const Iterator& other) { return  entry_ != other.entry_; }
+    bool operator!=(const Iterator& other) { return entry_ != other.entry_; }
 
    private:
     Iterator(const TemplateHashMapImpl<AllocationPolicy>* map,
-             typename TemplateHashMapImpl<AllocationPolicy>::Entry* entry) :
-        map_(map), entry_(entry) { }
+             typename TemplateHashMapImpl<AllocationPolicy>::Entry* entry)
+        : map_(map), entry_(entry) {}
 
     const TemplateHashMapImpl<AllocationPolicy>* map_;
     typename TemplateHashMapImpl<AllocationPolicy>::Entry* entry_;
@@ -334,10 +330,10 @@
   TemplateHashMap(
       typename TemplateHashMapImpl<AllocationPolicy>::MatchFun match,
       AllocationPolicy allocator = AllocationPolicy())
-        : TemplateHashMapImpl<AllocationPolicy>(
+      : TemplateHashMapImpl<AllocationPolicy>(
             match,
             TemplateHashMapImpl<AllocationPolicy>::kDefaultHashMapCapacity,
-            allocator) { }
+            allocator) {}
 
   Iterator begin() const { return Iterator(this, this->Start()); }
   Iterator end() const { return Iterator(this, NULL); }
@@ -350,7 +346,7 @@
   }
 };
 
-}  // namespace internal
+}  // namespace base
 }  // namespace v8
 
-#endif  // V8_HASHMAP_H_
+#endif  // V8_BASE_HASHMAP_H_
diff --git a/src/base/ieee754.cc b/src/base/ieee754.cc
new file mode 100644
index 0000000..b7178ca
--- /dev/null
+++ b/src/base/ieee754.cc
@@ -0,0 +1,2313 @@
+// The following is adapted from fdlibm (http://www.netlib.org/fdlibm).
+//
+// ====================================================
+// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunSoft, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+//
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2016 the V8 project authors. All rights reserved.
+
+#include "src/base/ieee754.h"
+
+#include <cmath>
+#include <limits>
+
+#include "src/base/build_config.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+namespace ieee754 {
+
+namespace {
+
+/* Disable "potential divide by 0" warning in Visual Studio compiler. */
+
+#if V8_CC_MSVC
+
+#pragma warning(disable : 4723)
+
+#endif
+
+/*
+ * The original fdlibm code used statements like:
+ *  n0 = ((*(int*)&one)>>29)^1;   * index of high word *
+ *  ix0 = *(n0+(int*)&x);     * high word of x *
+ *  ix1 = *((1-n0)+(int*)&x);   * low word of x *
+ * to dig two 32 bit words out of the 64 bit IEEE floating point
+ * value.  That is non-ANSI, and, moreover, the gcc instruction
+ * scheduler gets it wrong.  We instead use the following macros.
+ * Unlike the original code, we determine the endianness at compile
+ * time, not at run time; I don't see much benefit to selecting
+ * endianness at run time.
+ */
+
+/*
+ * A union which permits us to convert between a double and two 32 bit
+ * ints.
+ */
+
+#if V8_TARGET_LITTLE_ENDIAN
+
+typedef union {
+  double value;
+  struct {
+    uint32_t lsw;
+    uint32_t msw;
+  } parts;
+  struct {
+    uint64_t w;
+  } xparts;
+} ieee_double_shape_type;
+
+#else
+
+typedef union {
+  double value;
+  struct {
+    uint32_t msw;
+    uint32_t lsw;
+  } parts;
+  struct {
+    uint64_t w;
+  } xparts;
+} ieee_double_shape_type;
+
+#endif
+
+/* Get two 32 bit ints from a double.  */
+
+#define EXTRACT_WORDS(ix0, ix1, d) \
+  do {                             \
+    ieee_double_shape_type ew_u;   \
+    ew_u.value = (d);              \
+    (ix0) = ew_u.parts.msw;        \
+    (ix1) = ew_u.parts.lsw;        \
+  } while (0)
+
+/* Get a 64-bit int from a double. */
+#define EXTRACT_WORD64(ix, d)    \
+  do {                           \
+    ieee_double_shape_type ew_u; \
+    ew_u.value = (d);            \
+    (ix) = ew_u.xparts.w;        \
+  } while (0)
+
+/* Get the more significant 32 bit int from a double.  */
+
+#define GET_HIGH_WORD(i, d)      \
+  do {                           \
+    ieee_double_shape_type gh_u; \
+    gh_u.value = (d);            \
+    (i) = gh_u.parts.msw;        \
+  } while (0)
+
+/* Get the less significant 32 bit int from a double.  */
+
+#define GET_LOW_WORD(i, d)       \
+  do {                           \
+    ieee_double_shape_type gl_u; \
+    gl_u.value = (d);            \
+    (i) = gl_u.parts.lsw;        \
+  } while (0)
+
+/* Set a double from two 32 bit ints.  */
+
+#define INSERT_WORDS(d, ix0, ix1) \
+  do {                            \
+    ieee_double_shape_type iw_u;  \
+    iw_u.parts.msw = (ix0);       \
+    iw_u.parts.lsw = (ix1);       \
+    (d) = iw_u.value;             \
+  } while (0)
+
+/* Set a double from a 64-bit int. */
+#define INSERT_WORD64(d, ix)     \
+  do {                           \
+    ieee_double_shape_type iw_u; \
+    iw_u.xparts.w = (ix);        \
+    (d) = iw_u.value;            \
+  } while (0)
+
+/* Set the more significant 32 bits of a double from an int.  */
+
+#define SET_HIGH_WORD(d, v)      \
+  do {                           \
+    ieee_double_shape_type sh_u; \
+    sh_u.value = (d);            \
+    sh_u.parts.msw = (v);        \
+    (d) = sh_u.value;            \
+  } while (0)
+
+/* Set the less significant 32 bits of a double from an int.  */
+
+#define SET_LOW_WORD(d, v)       \
+  do {                           \
+    ieee_double_shape_type sl_u; \
+    sl_u.value = (d);            \
+    sl_u.parts.lsw = (v);        \
+    (d) = sl_u.value;            \
+  } while (0)
+
+/* Support macro. */
+
+#define STRICT_ASSIGN(type, lval, rval) ((lval) = (rval))
+
+int32_t __ieee754_rem_pio2(double x, double *y) WARN_UNUSED_RESULT;
+double __kernel_cos(double x, double y) WARN_UNUSED_RESULT;
+int __kernel_rem_pio2(double *x, double *y, int e0, int nx, int prec,
+                      const int32_t *ipio2) WARN_UNUSED_RESULT;
+double __kernel_sin(double x, double y, int iy) WARN_UNUSED_RESULT;
+
+/* __ieee754_rem_pio2(x,y)
+ *
+ * return the remainder of x rem pi/2 in y[0]+y[1]
+ * use __kernel_rem_pio2()
+ */
+int32_t __ieee754_rem_pio2(double x, double *y) {
+  /*
+   * Table of constants for 2/pi, 396 Hex digits (476 decimal) of 2/pi
+   */
+  static const int32_t two_over_pi[] = {
+      0xA2F983, 0x6E4E44, 0x1529FC, 0x2757D1, 0xF534DD, 0xC0DB62, 0x95993C,
+      0x439041, 0xFE5163, 0xABDEBB, 0xC561B7, 0x246E3A, 0x424DD2, 0xE00649,
+      0x2EEA09, 0xD1921C, 0xFE1DEB, 0x1CB129, 0xA73EE8, 0x8235F5, 0x2EBB44,
+      0x84E99C, 0x7026B4, 0x5F7E41, 0x3991D6, 0x398353, 0x39F49C, 0x845F8B,
+      0xBDF928, 0x3B1FF8, 0x97FFDE, 0x05980F, 0xEF2F11, 0x8B5A0A, 0x6D1F6D,
+      0x367ECF, 0x27CB09, 0xB74F46, 0x3F669E, 0x5FEA2D, 0x7527BA, 0xC7EBE5,
+      0xF17B3D, 0x0739F7, 0x8A5292, 0xEA6BFB, 0x5FB11F, 0x8D5D08, 0x560330,
+      0x46FC7B, 0x6BABF0, 0xCFBC20, 0x9AF436, 0x1DA9E3, 0x91615E, 0xE61B08,
+      0x659985, 0x5F14A0, 0x68408D, 0xFFD880, 0x4D7327, 0x310606, 0x1556CA,
+      0x73A8C9, 0x60E27B, 0xC08C6B,
+  };
+
+  static const int32_t npio2_hw[] = {
+      0x3FF921FB, 0x400921FB, 0x4012D97C, 0x401921FB, 0x401F6A7A, 0x4022D97C,
+      0x4025FDBB, 0x402921FB, 0x402C463A, 0x402F6A7A, 0x4031475C, 0x4032D97C,
+      0x40346B9C, 0x4035FDBB, 0x40378FDB, 0x403921FB, 0x403AB41B, 0x403C463A,
+      0x403DD85A, 0x403F6A7A, 0x40407E4C, 0x4041475C, 0x4042106C, 0x4042D97C,
+      0x4043A28C, 0x40446B9C, 0x404534AC, 0x4045FDBB, 0x4046C6CB, 0x40478FDB,
+      0x404858EB, 0x404921FB,
+  };
+
+  /*
+   * invpio2:  53 bits of 2/pi
+   * pio2_1:   first  33 bit of pi/2
+   * pio2_1t:  pi/2 - pio2_1
+   * pio2_2:   second 33 bit of pi/2
+   * pio2_2t:  pi/2 - (pio2_1+pio2_2)
+   * pio2_3:   third  33 bit of pi/2
+   * pio2_3t:  pi/2 - (pio2_1+pio2_2+pio2_3)
+   */
+
+  static const double
+      zero = 0.00000000000000000000e+00,    /* 0x00000000, 0x00000000 */
+      half = 5.00000000000000000000e-01,    /* 0x3FE00000, 0x00000000 */
+      two24 = 1.67772160000000000000e+07,   /* 0x41700000, 0x00000000 */
+      invpio2 = 6.36619772367581382433e-01, /* 0x3FE45F30, 0x6DC9C883 */
+      pio2_1 = 1.57079632673412561417e+00,  /* 0x3FF921FB, 0x54400000 */
+      pio2_1t = 6.07710050650619224932e-11, /* 0x3DD0B461, 0x1A626331 */
+      pio2_2 = 6.07710050630396597660e-11,  /* 0x3DD0B461, 0x1A600000 */
+      pio2_2t = 2.02226624879595063154e-21, /* 0x3BA3198A, 0x2E037073 */
+      pio2_3 = 2.02226624871116645580e-21,  /* 0x3BA3198A, 0x2E000000 */
+      pio2_3t = 8.47842766036889956997e-32; /* 0x397B839A, 0x252049C1 */
+
+  double z, w, t, r, fn;
+  double tx[3];
+  int32_t e0, i, j, nx, n, ix, hx;
+  uint32_t low;
+
+  z = 0;
+  GET_HIGH_WORD(hx, x); /* high word of x */
+  ix = hx & 0x7fffffff;
+  if (ix <= 0x3fe921fb) { /* |x| ~<= pi/4 , no need for reduction */
+    y[0] = x;
+    y[1] = 0;
+    return 0;
+  }
+  if (ix < 0x4002d97c) { /* |x| < 3pi/4, special case with n=+-1 */
+    if (hx > 0) {
+      z = x - pio2_1;
+      if (ix != 0x3ff921fb) { /* 33+53 bit pi is good enough */
+        y[0] = z - pio2_1t;
+        y[1] = (z - y[0]) - pio2_1t;
+      } else { /* near pi/2, use 33+33+53 bit pi */
+        z -= pio2_2;
+        y[0] = z - pio2_2t;
+        y[1] = (z - y[0]) - pio2_2t;
+      }
+      return 1;
+    } else { /* negative x */
+      z = x + pio2_1;
+      if (ix != 0x3ff921fb) { /* 33+53 bit pi is good enough */
+        y[0] = z + pio2_1t;
+        y[1] = (z - y[0]) + pio2_1t;
+      } else { /* near pi/2, use 33+33+53 bit pi */
+        z += pio2_2;
+        y[0] = z + pio2_2t;
+        y[1] = (z - y[0]) + pio2_2t;
+      }
+      return -1;
+    }
+  }
+  if (ix <= 0x413921fb) { /* |x| ~<= 2^19*(pi/2), medium size */
+    t = fabs(x);
+    n = static_cast<int32_t>(t * invpio2 + half);
+    fn = static_cast<double>(n);
+    r = t - fn * pio2_1;
+    w = fn * pio2_1t; /* 1st round good to 85 bit */
+    if (n < 32 && ix != npio2_hw[n - 1]) {
+      y[0] = r - w; /* quick check no cancellation */
+    } else {
+      uint32_t high;
+      j = ix >> 20;
+      y[0] = r - w;
+      GET_HIGH_WORD(high, y[0]);
+      i = j - ((high >> 20) & 0x7ff);
+      if (i > 16) { /* 2nd iteration needed, good to 118 */
+        t = r;
+        w = fn * pio2_2;
+        r = t - w;
+        w = fn * pio2_2t - ((t - r) - w);
+        y[0] = r - w;
+        GET_HIGH_WORD(high, y[0]);
+        i = j - ((high >> 20) & 0x7ff);
+        if (i > 49) { /* 3rd iteration need, 151 bits acc */
+          t = r;      /* will cover all possible cases */
+          w = fn * pio2_3;
+          r = t - w;
+          w = fn * pio2_3t - ((t - r) - w);
+          y[0] = r - w;
+        }
+      }
+    }
+    y[1] = (r - y[0]) - w;
+    if (hx < 0) {
+      y[0] = -y[0];
+      y[1] = -y[1];
+      return -n;
+    } else {
+      return n;
+    }
+  }
+  /*
+   * all other (large) arguments
+   */
+  if (ix >= 0x7ff00000) { /* x is inf or NaN */
+    y[0] = y[1] = x - x;
+    return 0;
+  }
+  /* set z = scalbn(|x|,ilogb(x)-23) */
+  GET_LOW_WORD(low, x);
+  SET_LOW_WORD(z, low);
+  e0 = (ix >> 20) - 1046; /* e0 = ilogb(z)-23; */
+  SET_HIGH_WORD(z, ix - static_cast<int32_t>(e0 << 20));
+  for (i = 0; i < 2; i++) {
+    tx[i] = static_cast<double>(static_cast<int32_t>(z));
+    z = (z - tx[i]) * two24;
+  }
+  tx[2] = z;
+  nx = 3;
+  while (tx[nx - 1] == zero) nx--; /* skip zero term */
+  n = __kernel_rem_pio2(tx, y, e0, nx, 2, two_over_pi);
+  if (hx < 0) {
+    y[0] = -y[0];
+    y[1] = -y[1];
+    return -n;
+  }
+  return n;
+}
+
+/* __kernel_cos( x,  y )
+ * kernel cos function on [-pi/4, pi/4], pi/4 ~ 0.785398164
+ * Input x is assumed to be bounded by ~pi/4 in magnitude.
+ * Input y is the tail of x.
+ *
+ * Algorithm
+ *      1. Since cos(-x) = cos(x), we need only to consider positive x.
+ *      2. if x < 2^-27 (hx<0x3e400000 0), return 1 with inexact if x!=0.
+ *      3. cos(x) is approximated by a polynomial of degree 14 on
+ *         [0,pi/4]
+ *                                       4            14
+ *              cos(x) ~ 1 - x*x/2 + C1*x + ... + C6*x
+ *         where the remez error is
+ *
+ *      |              2     4     6     8     10    12     14 |     -58
+ *      |cos(x)-(1-.5*x +C1*x +C2*x +C3*x +C4*x +C5*x  +C6*x  )| <= 2
+ *      |                                                      |
+ *
+ *                     4     6     8     10    12     14
+ *      4. let r = C1*x +C2*x +C3*x +C4*x +C5*x  +C6*x  , then
+ *             cos(x) = 1 - x*x/2 + r
+ *         since cos(x+y) ~ cos(x) - sin(x)*y
+ *                        ~ cos(x) - x*y,
+ *         a correction term is necessary in cos(x) and hence
+ *              cos(x+y) = 1 - (x*x/2 - (r - x*y))
+ *         For better accuracy when x > 0.3, let qx = |x|/4 with
+ *         the last 32 bits mask off, and if x > 0.78125, let qx = 0.28125.
+ *         Then
+ *              cos(x+y) = (1-qx) - ((x*x/2-qx) - (r-x*y)).
+ *         Note that 1-qx and (x*x/2-qx) is EXACT here, and the
+ *         magnitude of the latter is at least a quarter of x*x/2,
+ *         thus, reducing the rounding error in the subtraction.
+ */
+V8_INLINE double __kernel_cos(double x, double y) {
+  static const double
+      one = 1.00000000000000000000e+00, /* 0x3FF00000, 0x00000000 */
+      C1 = 4.16666666666666019037e-02,  /* 0x3FA55555, 0x5555554C */
+      C2 = -1.38888888888741095749e-03, /* 0xBF56C16C, 0x16C15177 */
+      C3 = 2.48015872894767294178e-05,  /* 0x3EFA01A0, 0x19CB1590 */
+      C4 = -2.75573143513906633035e-07, /* 0xBE927E4F, 0x809C52AD */
+      C5 = 2.08757232129817482790e-09,  /* 0x3E21EE9E, 0xBDB4B1C4 */
+      C6 = -1.13596475577881948265e-11; /* 0xBDA8FAE9, 0xBE8838D4 */
+
+  double a, iz, z, r, qx;
+  int32_t ix;
+  GET_HIGH_WORD(ix, x);
+  ix &= 0x7fffffff;                           /* ix = |x|'s high word*/
+  if (ix < 0x3e400000) {                      /* if x < 2**27 */
+    if (static_cast<int>(x) == 0) return one; /* generate inexact */
+  }
+  z = x * x;
+  r = z * (C1 + z * (C2 + z * (C3 + z * (C4 + z * (C5 + z * C6)))));
+  if (ix < 0x3FD33333) { /* if |x| < 0.3 */
+    return one - (0.5 * z - (z * r - x * y));
+  } else {
+    if (ix > 0x3fe90000) { /* x > 0.78125 */
+      qx = 0.28125;
+    } else {
+      INSERT_WORDS(qx, ix - 0x00200000, 0); /* x/4 */
+    }
+    iz = 0.5 * z - qx;
+    a = one - qx;
+    return a - (iz - (z * r - x * y));
+  }
+}
+
+/* __kernel_rem_pio2(x,y,e0,nx,prec,ipio2)
+ * double x[],y[]; int e0,nx,prec; int ipio2[];
+ *
+ * __kernel_rem_pio2 return the last three digits of N with
+ *              y = x - N*pi/2
+ * so that |y| < pi/2.
+ *
+ * The method is to compute the integer (mod 8) and fraction parts of
+ * (2/pi)*x without doing the full multiplication. In general we
+ * skip the part of the product that are known to be a huge integer (
+ * more accurately, = 0 mod 8 ). Thus the number of operations are
+ * independent of the exponent of the input.
+ *
+ * (2/pi) is represented by an array of 24-bit integers in ipio2[].
+ *
+ * Input parameters:
+ *      x[]     The input value (must be positive) is broken into nx
+ *              pieces of 24-bit integers in double precision format.
+ *              x[i] will be the i-th 24 bit of x. The scaled exponent
+ *              of x[0] is given in input parameter e0 (i.e., x[0]*2^e0
+ *              match x's up to 24 bits.
+ *
+ *              Example of breaking a double positive z into x[0]+x[1]+x[2]:
+ *                      e0 = ilogb(z)-23
+ *                      z  = scalbn(z,-e0)
+ *              for i = 0,1,2
+ *                      x[i] = floor(z)
+ *                      z    = (z-x[i])*2**24
+ *
+ *
+ *      y[]     output result in an array of double precision numbers.
+ *              The dimension of y[] is:
+ *                      24-bit  precision       1
+ *                      53-bit  precision       2
+ *                      64-bit  precision       2
+ *                      113-bit precision       3
+ *              The actual value is the sum of them. Thus for 113-bit
+ *              precison, one may have to do something like:
+ *
+ *              long double t,w,r_head, r_tail;
+ *              t = (long double)y[2] + (long double)y[1];
+ *              w = (long double)y[0];
+ *              r_head = t+w;
+ *              r_tail = w - (r_head - t);
+ *
+ *      e0      The exponent of x[0]
+ *
+ *      nx      dimension of x[]
+ *
+ *      prec    an integer indicating the precision:
+ *                      0       24  bits (single)
+ *                      1       53  bits (double)
+ *                      2       64  bits (extended)
+ *                      3       113 bits (quad)
+ *
+ *      ipio2[]
+ *              integer array, contains the (24*i)-th to (24*i+23)-th
+ *              bit of 2/pi after binary point. The corresponding
+ *              floating value is
+ *
+ *                      ipio2[i] * 2^(-24(i+1)).
+ *
+ * External function:
+ *      double scalbn(), floor();
+ *
+ *
+ * Here is the description of some local variables:
+ *
+ *      jk      jk+1 is the initial number of terms of ipio2[] needed
+ *              in the computation. The recommended value is 2,3,4,
+ *              6 for single, double, extended,and quad.
+ *
+ *      jz      local integer variable indicating the number of
+ *              terms of ipio2[] used.
+ *
+ *      jx      nx - 1
+ *
+ *      jv      index for pointing to the suitable ipio2[] for the
+ *              computation. In general, we want
+ *                      ( 2^e0*x[0] * ipio2[jv-1]*2^(-24jv) )/8
+ *              is an integer. Thus
+ *                      e0-3-24*jv >= 0 or (e0-3)/24 >= jv
+ *              Hence jv = max(0,(e0-3)/24).
+ *
+ *      jp      jp+1 is the number of terms in PIo2[] needed, jp = jk.
+ *
+ *      q[]     double array with integral value, representing the
+ *              24-bits chunk of the product of x and 2/pi.
+ *
+ *      q0      the corresponding exponent of q[0]. Note that the
+ *              exponent for q[i] would be q0-24*i.
+ *
+ *      PIo2[]  double precision array, obtained by cutting pi/2
+ *              into 24 bits chunks.
+ *
+ *      f[]     ipio2[] in floating point
+ *
+ *      iq[]    integer array by breaking up q[] in 24-bits chunk.
+ *
+ *      fq[]    final product of x*(2/pi) in fq[0],..,fq[jk]
+ *
+ *      ih      integer. If >0 it indicates q[] is >= 0.5, hence
+ *              it also indicates the *sign* of the result.
+ *
+ */
+int __kernel_rem_pio2(double *x, double *y, int e0, int nx, int prec,
+                      const int32_t *ipio2) {
+  /* Constants:
+   * The hexadecimal values are the intended ones for the following
+   * constants. The decimal values may be used, provided that the
+   * compiler will convert from decimal to binary accurately enough
+   * to produce the hexadecimal values shown.
+   */
+  static const int init_jk[] = {2, 3, 4, 6}; /* initial value for jk */
+
+  static const double PIo2[] = {
+      1.57079625129699707031e+00, /* 0x3FF921FB, 0x40000000 */
+      7.54978941586159635335e-08, /* 0x3E74442D, 0x00000000 */
+      5.39030252995776476554e-15, /* 0x3CF84698, 0x80000000 */
+      3.28200341580791294123e-22, /* 0x3B78CC51, 0x60000000 */
+      1.27065575308067607349e-29, /* 0x39F01B83, 0x80000000 */
+      1.22933308981111328932e-36, /* 0x387A2520, 0x40000000 */
+      2.73370053816464559624e-44, /* 0x36E38222, 0x80000000 */
+      2.16741683877804819444e-51, /* 0x3569F31D, 0x00000000 */
+  };
+
+  static const double
+      zero = 0.0,
+      one = 1.0,
+      two24 = 1.67772160000000000000e+07,  /* 0x41700000, 0x00000000 */
+      twon24 = 5.96046447753906250000e-08; /* 0x3E700000, 0x00000000 */
+
+  int32_t jz, jx, jv, jp, jk, carry, n, iq[20], i, j, k, m, q0, ih;
+  double z, fw, f[20], fq[20], q[20];
+
+  /* initialize jk*/
+  jk = init_jk[prec];
+  jp = jk;
+
+  /* determine jx,jv,q0, note that 3>q0 */
+  jx = nx - 1;
+  jv = (e0 - 3) / 24;
+  if (jv < 0) jv = 0;
+  q0 = e0 - 24 * (jv + 1);
+
+  /* set up f[0] to f[jx+jk] where f[jx+jk] = ipio2[jv+jk] */
+  j = jv - jx;
+  m = jx + jk;
+  for (i = 0; i <= m; i++, j++) {
+    f[i] = (j < 0) ? zero : static_cast<double>(ipio2[j]);
+  }
+
+  /* compute q[0],q[1],...q[jk] */
+  for (i = 0; i <= jk; i++) {
+    for (j = 0, fw = 0.0; j <= jx; j++) fw += x[j] * f[jx + i - j];
+    q[i] = fw;
+  }
+
+  jz = jk;
+recompute:
+  /* distill q[] into iq[] reversingly */
+  for (i = 0, j = jz, z = q[jz]; j > 0; i++, j--) {
+    fw = static_cast<double>(static_cast<int32_t>(twon24 * z));
+    iq[i] = static_cast<int32_t>(z - two24 * fw);
+    z = q[j - 1] + fw;
+  }
+
+  /* compute n */
+  z = scalbn(z, q0);           /* actual value of z */
+  z -= 8.0 * floor(z * 0.125); /* trim off integer >= 8 */
+  n = static_cast<int32_t>(z);
+  z -= static_cast<double>(n);
+  ih = 0;
+  if (q0 > 0) { /* need iq[jz-1] to determine n */
+    i = (iq[jz - 1] >> (24 - q0));
+    n += i;
+    iq[jz - 1] -= i << (24 - q0);
+    ih = iq[jz - 1] >> (23 - q0);
+  } else if (q0 == 0) {
+    ih = iq[jz - 1] >> 23;
+  } else if (z >= 0.5) {
+    ih = 2;
+  }
+
+  if (ih > 0) { /* q > 0.5 */
+    n += 1;
+    carry = 0;
+    for (i = 0; i < jz; i++) { /* compute 1-q */
+      j = iq[i];
+      if (carry == 0) {
+        if (j != 0) {
+          carry = 1;
+          iq[i] = 0x1000000 - j;
+        }
+      } else {
+        iq[i] = 0xffffff - j;
+      }
+    }
+    if (q0 > 0) { /* rare case: chance is 1 in 12 */
+      switch (q0) {
+        case 1:
+          iq[jz - 1] &= 0x7fffff;
+          break;
+        case 2:
+          iq[jz - 1] &= 0x3fffff;
+          break;
+      }
+    }
+    if (ih == 2) {
+      z = one - z;
+      if (carry != 0) z -= scalbn(one, q0);
+    }
+  }
+
+  /* check if recomputation is needed */
+  if (z == zero) {
+    j = 0;
+    for (i = jz - 1; i >= jk; i--) j |= iq[i];
+    if (j == 0) { /* need recomputation */
+      for (k = 1; jk >= k && iq[jk - k] == 0; k++) {
+        /* k = no. of terms needed */
+      }
+
+      for (i = jz + 1; i <= jz + k; i++) { /* add q[jz+1] to q[jz+k] */
+        f[jx + i] = ipio2[jv + i];
+        for (j = 0, fw = 0.0; j <= jx; j++) fw += x[j] * f[jx + i - j];
+        q[i] = fw;
+      }
+      jz += k;
+      goto recompute;
+    }
+  }
+
+  /* chop off zero terms */
+  if (z == 0.0) {
+    jz -= 1;
+    q0 -= 24;
+    while (iq[jz] == 0) {
+      jz--;
+      q0 -= 24;
+    }
+  } else { /* break z into 24-bit if necessary */
+    z = scalbn(z, -q0);
+    if (z >= two24) {
+      fw = static_cast<double>(static_cast<int32_t>(twon24 * z));
+      iq[jz] = z - two24 * fw;
+      jz += 1;
+      q0 += 24;
+      iq[jz] = fw;
+    } else {
+      iq[jz] = z;
+    }
+  }
+
+  /* convert integer "bit" chunk to floating-point value */
+  fw = scalbn(one, q0);
+  for (i = jz; i >= 0; i--) {
+    q[i] = fw * iq[i];
+    fw *= twon24;
+  }
+
+  /* compute PIo2[0,...,jp]*q[jz,...,0] */
+  for (i = jz; i >= 0; i--) {
+    for (fw = 0.0, k = 0; k <= jp && k <= jz - i; k++) fw += PIo2[k] * q[i + k];
+    fq[jz - i] = fw;
+  }
+
+  /* compress fq[] into y[] */
+  switch (prec) {
+    case 0:
+      fw = 0.0;
+      for (i = jz; i >= 0; i--) fw += fq[i];
+      y[0] = (ih == 0) ? fw : -fw;
+      break;
+    case 1:
+    case 2:
+      fw = 0.0;
+      for (i = jz; i >= 0; i--) fw += fq[i];
+      y[0] = (ih == 0) ? fw : -fw;
+      fw = fq[0] - fw;
+      for (i = 1; i <= jz; i++) fw += fq[i];
+      y[1] = (ih == 0) ? fw : -fw;
+      break;
+    case 3: /* painful */
+      for (i = jz; i > 0; i--) {
+        fw = fq[i - 1] + fq[i];
+        fq[i] += fq[i - 1] - fw;
+        fq[i - 1] = fw;
+      }
+      for (i = jz; i > 1; i--) {
+        fw = fq[i - 1] + fq[i];
+        fq[i] += fq[i - 1] - fw;
+        fq[i - 1] = fw;
+      }
+      for (fw = 0.0, i = jz; i >= 2; i--) fw += fq[i];
+      if (ih == 0) {
+        y[0] = fq[0];
+        y[1] = fq[1];
+        y[2] = fw;
+      } else {
+        y[0] = -fq[0];
+        y[1] = -fq[1];
+        y[2] = -fw;
+      }
+  }
+  return n & 7;
+}
+
+/* __kernel_sin( x, y, iy)
+ * kernel sin function on [-pi/4, pi/4], pi/4 ~ 0.7854
+ * Input x is assumed to be bounded by ~pi/4 in magnitude.
+ * Input y is the tail of x.
+ * Input iy indicates whether y is 0. (if iy=0, y assume to be 0).
+ *
+ * Algorithm
+ *      1. Since sin(-x) = -sin(x), we need only to consider positive x.
+ *      2. if x < 2^-27 (hx<0x3e400000 0), return x with inexact if x!=0.
+ *      3. sin(x) is approximated by a polynomial of degree 13 on
+ *         [0,pi/4]
+ *                               3            13
+ *              sin(x) ~ x + S1*x + ... + S6*x
+ *         where
+ *
+ *      |sin(x)         2     4     6     8     10     12  |     -58
+ *      |----- - (1+S1*x +S2*x +S3*x +S4*x +S5*x  +S6*x   )| <= 2
+ *      |  x                                               |
+ *
+ *      4. sin(x+y) = sin(x) + sin'(x')*y
+ *                  ~ sin(x) + (1-x*x/2)*y
+ *         For better accuracy, let
+ *                   3      2      2      2      2
+ *              r = x *(S2+x *(S3+x *(S4+x *(S5+x *S6))))
+ *         then                   3    2
+ *              sin(x) = x + (S1*x + (x *(r-y/2)+y))
+ */
+V8_INLINE double __kernel_sin(double x, double y, int iy) {
+  static const double
+      half = 5.00000000000000000000e-01, /* 0x3FE00000, 0x00000000 */
+      S1 = -1.66666666666666324348e-01,  /* 0xBFC55555, 0x55555549 */
+      S2 = 8.33333333332248946124e-03,   /* 0x3F811111, 0x1110F8A6 */
+      S3 = -1.98412698298579493134e-04,  /* 0xBF2A01A0, 0x19C161D5 */
+      S4 = 2.75573137070700676789e-06,   /* 0x3EC71DE3, 0x57B1FE7D */
+      S5 = -2.50507602534068634195e-08,  /* 0xBE5AE5E6, 0x8A2B9CEB */
+      S6 = 1.58969099521155010221e-10;   /* 0x3DE5D93A, 0x5ACFD57C */
+
+  double z, r, v;
+  int32_t ix;
+  GET_HIGH_WORD(ix, x);
+  ix &= 0x7fffffff;      /* high word of x */
+  if (ix < 0x3e400000) { /* |x| < 2**-27 */
+    if (static_cast<int>(x) == 0) return x;
+  } /* generate inexact */
+  z = x * x;
+  v = z * x;
+  r = S2 + z * (S3 + z * (S4 + z * (S5 + z * S6)));
+  if (iy == 0) {
+    return x + v * (S1 + z * r);
+  } else {
+    return x - ((z * (half * y - v * r) - y) - v * S1);
+  }
+}
+
+/* __kernel_tan( x, y, k )
+ * kernel tan function on [-pi/4, pi/4], pi/4 ~ 0.7854
+ * Input x is assumed to be bounded by ~pi/4 in magnitude.
+ * Input y is the tail of x.
+ * Input k indicates whether tan (if k=1) or
+ * -1/tan (if k= -1) is returned.
+ *
+ * Algorithm
+ *      1. Since tan(-x) = -tan(x), we need only to consider positive x.
+ *      2. if x < 2^-28 (hx<0x3e300000 0), return x with inexact if x!=0.
+ *      3. tan(x) is approximated by a odd polynomial of degree 27 on
+ *         [0,0.67434]
+ *                               3             27
+ *              tan(x) ~ x + T1*x + ... + T13*x
+ *         where
+ *
+ *              |tan(x)         2     4            26   |     -59.2
+ *              |----- - (1+T1*x +T2*x +.... +T13*x    )| <= 2
+ *              |  x                                    |
+ *
+ *         Note: tan(x+y) = tan(x) + tan'(x)*y
+ *                        ~ tan(x) + (1+x*x)*y
+ *         Therefore, for better accuracy in computing tan(x+y), let
+ *                   3      2      2       2       2
+ *              r = x *(T2+x *(T3+x *(...+x *(T12+x *T13))))
+ *         then
+ *                                  3    2
+ *              tan(x+y) = x + (T1*x + (x *(r+y)+y))
+ *
+ *      4. For x in [0.67434,pi/4],  let y = pi/4 - x, then
+ *              tan(x) = tan(pi/4-y) = (1-tan(y))/(1+tan(y))
+ *                     = 1 - 2*(tan(y) - (tan(y)^2)/(1+tan(y)))
+ */
+double __kernel_tan(double x, double y, int iy) {
+  static const double xxx[] = {
+      3.33333333333334091986e-01,             /* 3FD55555, 55555563 */
+      1.33333333333201242699e-01,             /* 3FC11111, 1110FE7A */
+      5.39682539762260521377e-02,             /* 3FABA1BA, 1BB341FE */
+      2.18694882948595424599e-02,             /* 3F9664F4, 8406D637 */
+      8.86323982359930005737e-03,             /* 3F8226E3, E96E8493 */
+      3.59207910759131235356e-03,             /* 3F6D6D22, C9560328 */
+      1.45620945432529025516e-03,             /* 3F57DBC8, FEE08315 */
+      5.88041240820264096874e-04,             /* 3F4344D8, F2F26501 */
+      2.46463134818469906812e-04,             /* 3F3026F7, 1A8D1068 */
+      7.81794442939557092300e-05,             /* 3F147E88, A03792A6 */
+      7.14072491382608190305e-05,             /* 3F12B80F, 32F0A7E9 */
+      -1.85586374855275456654e-05,            /* BEF375CB, DB605373 */
+      2.59073051863633712884e-05,             /* 3EFB2A70, 74BF7AD4 */
+      /* one */ 1.00000000000000000000e+00,   /* 3FF00000, 00000000 */
+      /* pio4 */ 7.85398163397448278999e-01,  /* 3FE921FB, 54442D18 */
+      /* pio4lo */ 3.06161699786838301793e-17 /* 3C81A626, 33145C07 */
+  };
+#define one xxx[13]
+#define pio4 xxx[14]
+#define pio4lo xxx[15]
+#define T xxx
+
+  double z, r, v, w, s;
+  int32_t ix, hx;
+
+  GET_HIGH_WORD(hx, x);             /* high word of x */
+  ix = hx & 0x7fffffff;             /* high word of |x| */
+  if (ix < 0x3e300000) {            /* x < 2**-28 */
+    if (static_cast<int>(x) == 0) { /* generate inexact */
+      uint32_t low;
+      GET_LOW_WORD(low, x);
+      if (((ix | low) | (iy + 1)) == 0) {
+        return one / fabs(x);
+      } else {
+        if (iy == 1) {
+          return x;
+        } else { /* compute -1 / (x+y) carefully */
+          double a, t;
+
+          z = w = x + y;
+          SET_LOW_WORD(z, 0);
+          v = y - (z - x);
+          t = a = -one / w;
+          SET_LOW_WORD(t, 0);
+          s = one + t * z;
+          return t + a * (s + t * v);
+        }
+      }
+    }
+  }
+  if (ix >= 0x3FE59428) { /* |x| >= 0.6744 */
+    if (hx < 0) {
+      x = -x;
+      y = -y;
+    }
+    z = pio4 - x;
+    w = pio4lo - y;
+    x = z + w;
+    y = 0.0;
+  }
+  z = x * x;
+  w = z * z;
+  /*
+   * Break x^5*(T[1]+x^2*T[2]+...) into
+   * x^5(T[1]+x^4*T[3]+...+x^20*T[11]) +
+   * x^5(x^2*(T[2]+x^4*T[4]+...+x^22*[T12]))
+   */
+  r = T[1] + w * (T[3] + w * (T[5] + w * (T[7] + w * (T[9] + w * T[11]))));
+  v = z *
+      (T[2] + w * (T[4] + w * (T[6] + w * (T[8] + w * (T[10] + w * T[12])))));
+  s = z * x;
+  r = y + z * (s * (r + v) + y);
+  r += T[0] * s;
+  w = x + r;
+  if (ix >= 0x3FE59428) {
+    v = iy;
+    return (1 - ((hx >> 30) & 2)) * (v - 2.0 * (x - (w * w / (w + v) - r)));
+  }
+  if (iy == 1) {
+    return w;
+  } else {
+    /*
+     * if allow error up to 2 ulp, simply return
+     * -1.0 / (x+r) here
+     */
+    /* compute -1.0 / (x+r) accurately */
+    double a, t;
+    z = w;
+    SET_LOW_WORD(z, 0);
+    v = r - (z - x);  /* z+v = r+x */
+    t = a = -1.0 / w; /* a = -1.0/w */
+    SET_LOW_WORD(t, 0);
+    s = 1.0 + t * z;
+    return t + a * (s + t * v);
+  }
+
+#undef one
+#undef pio4
+#undef pio4lo
+#undef T
+}
+
+}  // namespace
+
+/* atan(x)
+ * Method
+ *   1. Reduce x to positive by atan(x) = -atan(-x).
+ *   2. According to the integer k=4t+0.25 chopped, t=x, the argument
+ *      is further reduced to one of the following intervals and the
+ *      arctangent of t is evaluated by the corresponding formula:
+ *
+ *      [0,7/16]      atan(x) = t-t^3*(a1+t^2*(a2+...(a10+t^2*a11)...)
+ *      [7/16,11/16]  atan(x) = atan(1/2) + atan( (t-0.5)/(1+t/2) )
+ *      [11/16.19/16] atan(x) = atan( 1 ) + atan( (t-1)/(1+t) )
+ *      [19/16,39/16] atan(x) = atan(3/2) + atan( (t-1.5)/(1+1.5t) )
+ *      [39/16,INF]   atan(x) = atan(INF) + atan( -1/t )
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+double atan(double x) {
+  static const double atanhi[] = {
+      4.63647609000806093515e-01, /* atan(0.5)hi 0x3FDDAC67, 0x0561BB4F */
+      7.85398163397448278999e-01, /* atan(1.0)hi 0x3FE921FB, 0x54442D18 */
+      9.82793723247329054082e-01, /* atan(1.5)hi 0x3FEF730B, 0xD281F69B */
+      1.57079632679489655800e+00, /* atan(inf)hi 0x3FF921FB, 0x54442D18 */
+  };
+
+  static const double atanlo[] = {
+      2.26987774529616870924e-17, /* atan(0.5)lo 0x3C7A2B7F, 0x222F65E2 */
+      3.06161699786838301793e-17, /* atan(1.0)lo 0x3C81A626, 0x33145C07 */
+      1.39033110312309984516e-17, /* atan(1.5)lo 0x3C700788, 0x7AF0CBBD */
+      6.12323399573676603587e-17, /* atan(inf)lo 0x3C91A626, 0x33145C07 */
+  };
+
+  static const double aT[] = {
+      3.33333333333329318027e-01,  /* 0x3FD55555, 0x5555550D */
+      -1.99999999998764832476e-01, /* 0xBFC99999, 0x9998EBC4 */
+      1.42857142725034663711e-01,  /* 0x3FC24924, 0x920083FF */
+      -1.11111104054623557880e-01, /* 0xBFBC71C6, 0xFE231671 */
+      9.09088713343650656196e-02,  /* 0x3FB745CD, 0xC54C206E */
+      -7.69187620504482999495e-02, /* 0xBFB3B0F2, 0xAF749A6D */
+      6.66107313738753120669e-02,  /* 0x3FB10D66, 0xA0D03D51 */
+      -5.83357013379057348645e-02, /* 0xBFADDE2D, 0x52DEFD9A */
+      4.97687799461593236017e-02,  /* 0x3FA97B4B, 0x24760DEB */
+      -3.65315727442169155270e-02, /* 0xBFA2B444, 0x2C6A6C2F */
+      1.62858201153657823623e-02,  /* 0x3F90AD3A, 0xE322DA11 */
+  };
+
+  static const double one = 1.0, huge = 1.0e300;
+
+  double w, s1, s2, z;
+  int32_t ix, hx, id;
+
+  GET_HIGH_WORD(hx, x);
+  ix = hx & 0x7fffffff;
+  if (ix >= 0x44100000) { /* if |x| >= 2^66 */
+    uint32_t low;
+    GET_LOW_WORD(low, x);
+    if (ix > 0x7ff00000 || (ix == 0x7ff00000 && (low != 0)))
+      return x + x; /* NaN */
+    if (hx > 0)
+      return atanhi[3] + *(volatile double *)&atanlo[3];
+    else
+      return -atanhi[3] - *(volatile double *)&atanlo[3];
+  }
+  if (ix < 0x3fdc0000) {            /* |x| < 0.4375 */
+    if (ix < 0x3e400000) {          /* |x| < 2^-27 */
+      if (huge + x > one) return x; /* raise inexact */
+    }
+    id = -1;
+  } else {
+    x = fabs(x);
+    if (ix < 0x3ff30000) {   /* |x| < 1.1875 */
+      if (ix < 0x3fe60000) { /* 7/16 <=|x|<11/16 */
+        id = 0;
+        x = (2.0 * x - one) / (2.0 + x);
+      } else { /* 11/16<=|x|< 19/16 */
+        id = 1;
+        x = (x - one) / (x + one);
+      }
+    } else {
+      if (ix < 0x40038000) { /* |x| < 2.4375 */
+        id = 2;
+        x = (x - 1.5) / (one + 1.5 * x);
+      } else { /* 2.4375 <= |x| < 2^66 */
+        id = 3;
+        x = -1.0 / x;
+      }
+    }
+  }
+  /* end of argument reduction */
+  z = x * x;
+  w = z * z;
+  /* break sum from i=0 to 10 aT[i]z**(i+1) into odd and even poly */
+  s1 = z * (aT[0] +
+            w * (aT[2] + w * (aT[4] + w * (aT[6] + w * (aT[8] + w * aT[10])))));
+  s2 = w * (aT[1] + w * (aT[3] + w * (aT[5] + w * (aT[7] + w * aT[9]))));
+  if (id < 0) {
+    return x - x * (s1 + s2);
+  } else {
+    z = atanhi[id] - ((x * (s1 + s2) - atanlo[id]) - x);
+    return (hx < 0) ? -z : z;
+  }
+}
+
+/* atan2(y,x)
+ * Method :
+ *  1. Reduce y to positive by atan2(y,x)=-atan2(-y,x).
+ *  2. Reduce x to positive by (if x and y are unexceptional):
+ *    ARG (x+iy) = arctan(y/x)       ... if x > 0,
+ *    ARG (x+iy) = pi - arctan[y/(-x)]   ... if x < 0,
+ *
+ * Special cases:
+ *
+ *  ATAN2((anything), NaN ) is NaN;
+ *  ATAN2(NAN , (anything) ) is NaN;
+ *  ATAN2(+-0, +(anything but NaN)) is +-0  ;
+ *  ATAN2(+-0, -(anything but NaN)) is +-pi ;
+ *  ATAN2(+-(anything but 0 and NaN), 0) is +-pi/2;
+ *  ATAN2(+-(anything but INF and NaN), +INF) is +-0 ;
+ *  ATAN2(+-(anything but INF and NaN), -INF) is +-pi;
+ *  ATAN2(+-INF,+INF ) is +-pi/4 ;
+ *  ATAN2(+-INF,-INF ) is +-3pi/4;
+ *  ATAN2(+-INF, (anything but,0,NaN, and INF)) is +-pi/2;
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+double atan2(double y, double x) {
+  static volatile double tiny = 1.0e-300;
+  static const double
+      zero = 0.0,
+      pi_o_4 = 7.8539816339744827900E-01, /* 0x3FE921FB, 0x54442D18 */
+      pi_o_2 = 1.5707963267948965580E+00, /* 0x3FF921FB, 0x54442D18 */
+      pi = 3.1415926535897931160E+00;     /* 0x400921FB, 0x54442D18 */
+  static volatile double pi_lo =
+      1.2246467991473531772E-16; /* 0x3CA1A626, 0x33145C07 */
+
+  double z;
+  int32_t k, m, hx, hy, ix, iy;
+  uint32_t lx, ly;
+
+  EXTRACT_WORDS(hx, lx, x);
+  ix = hx & 0x7fffffff;
+  EXTRACT_WORDS(hy, ly, y);
+  iy = hy & 0x7fffffff;
+  if (((ix | ((lx | -static_cast<int32_t>(lx)) >> 31)) > 0x7ff00000) ||
+      ((iy | ((ly | -static_cast<int32_t>(ly)) >> 31)) > 0x7ff00000)) {
+    return x + y; /* x or y is NaN */
+  }
+  if (((hx - 0x3ff00000) | lx) == 0) return atan(y); /* x=1.0 */
+  m = ((hy >> 31) & 1) | ((hx >> 30) & 2);           /* 2*sign(x)+sign(y) */
+
+  /* when y = 0 */
+  if ((iy | ly) == 0) {
+    switch (m) {
+      case 0:
+      case 1:
+        return y; /* atan(+-0,+anything)=+-0 */
+      case 2:
+        return pi + tiny; /* atan(+0,-anything) = pi */
+      case 3:
+        return -pi - tiny; /* atan(-0,-anything) =-pi */
+    }
+  }
+  /* when x = 0 */
+  if ((ix | lx) == 0) return (hy < 0) ? -pi_o_2 - tiny : pi_o_2 + tiny;
+
+  /* when x is INF */
+  if (ix == 0x7ff00000) {
+    if (iy == 0x7ff00000) {
+      switch (m) {
+        case 0:
+          return pi_o_4 + tiny; /* atan(+INF,+INF) */
+        case 1:
+          return -pi_o_4 - tiny; /* atan(-INF,+INF) */
+        case 2:
+          return 3.0 * pi_o_4 + tiny; /*atan(+INF,-INF)*/
+        case 3:
+          return -3.0 * pi_o_4 - tiny; /*atan(-INF,-INF)*/
+      }
+    } else {
+      switch (m) {
+        case 0:
+          return zero; /* atan(+...,+INF) */
+        case 1:
+          return -zero; /* atan(-...,+INF) */
+        case 2:
+          return pi + tiny; /* atan(+...,-INF) */
+        case 3:
+          return -pi - tiny; /* atan(-...,-INF) */
+      }
+    }
+  }
+  /* when y is INF */
+  if (iy == 0x7ff00000) return (hy < 0) ? -pi_o_2 - tiny : pi_o_2 + tiny;
+
+  /* compute y/x */
+  k = (iy - ix) >> 20;
+  if (k > 60) { /* |y/x| >  2**60 */
+    z = pi_o_2 + 0.5 * pi_lo;
+    m &= 1;
+  } else if (hx < 0 && k < -60) {
+    z = 0.0; /* 0 > |y|/x > -2**-60 */
+  } else {
+    z = atan(fabs(y / x)); /* safe to do y/x */
+  }
+  switch (m) {
+    case 0:
+      return z; /* atan(+,+) */
+    case 1:
+      return -z; /* atan(-,+) */
+    case 2:
+      return pi - (z - pi_lo); /* atan(+,-) */
+    default:                   /* case 3 */
+      return (z - pi_lo) - pi; /* atan(-,-) */
+  }
+}
+
+/* cos(x)
+ * Return cosine function of x.
+ *
+ * kernel function:
+ *      __kernel_sin            ... sine function on [-pi/4,pi/4]
+ *      __kernel_cos            ... cosine function on [-pi/4,pi/4]
+ *      __ieee754_rem_pio2      ... argument reduction routine
+ *
+ * Method.
+ *      Let S,C and T denote the sin, cos and tan respectively on
+ *      [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2
+ *      in [-pi/4 , +pi/4], and let n = k mod 4.
+ *      We have
+ *
+ *          n        sin(x)      cos(x)        tan(x)
+ *     ----------------------------------------------------------
+ *          0          S           C             T
+ *          1          C          -S            -1/T
+ *          2         -S          -C             T
+ *          3         -C           S            -1/T
+ *     ----------------------------------------------------------
+ *
+ * Special cases:
+ *      Let trig be any of sin, cos, or tan.
+ *      trig(+-INF)  is NaN, with signals;
+ *      trig(NaN)    is that NaN;
+ *
+ * Accuracy:
+ *      TRIG(x) returns trig(x) nearly rounded
+ */
+double cos(double x) {
+  double y[2], z = 0.0;
+  int32_t n, ix;
+
+  /* High word of x. */
+  GET_HIGH_WORD(ix, x);
+
+  /* |x| ~< pi/4 */
+  ix &= 0x7fffffff;
+  if (ix <= 0x3fe921fb) {
+    return __kernel_cos(x, z);
+  } else if (ix >= 0x7ff00000) {
+    /* cos(Inf or NaN) is NaN */
+    return x - x;
+  } else {
+    /* argument reduction needed */
+    n = __ieee754_rem_pio2(x, y);
+    switch (n & 3) {
+      case 0:
+        return __kernel_cos(y[0], y[1]);
+      case 1:
+        return -__kernel_sin(y[0], y[1], 1);
+      case 2:
+        return -__kernel_cos(y[0], y[1]);
+      default:
+        return __kernel_sin(y[0], y[1], 1);
+    }
+  }
+}
+
+/* exp(x)
+ * Returns the exponential of x.
+ *
+ * Method
+ *   1. Argument reduction:
+ *      Reduce x to an r so that |r| <= 0.5*ln2 ~ 0.34658.
+ *      Given x, find r and integer k such that
+ *
+ *               x = k*ln2 + r,  |r| <= 0.5*ln2.
+ *
+ *      Here r will be represented as r = hi-lo for better
+ *      accuracy.
+ *
+ *   2. Approximation of exp(r) by a special rational function on
+ *      the interval [0,0.34658]:
+ *      Write
+ *          R(r**2) = r*(exp(r)+1)/(exp(r)-1) = 2 + r*r/6 - r**4/360 + ...
+ *      We use a special Remes algorithm on [0,0.34658] to generate
+ *      a polynomial of degree 5 to approximate R. The maximum error
+ *      of this polynomial approximation is bounded by 2**-59. In
+ *      other words,
+ *          R(z) ~ 2.0 + P1*z + P2*z**2 + P3*z**3 + P4*z**4 + P5*z**5
+ *      (where z=r*r, and the values of P1 to P5 are listed below)
+ *      and
+ *          |                  5          |     -59
+ *          | 2.0+P1*z+...+P5*z   -  R(z) | <= 2
+ *          |                             |
+ *      The computation of exp(r) thus becomes
+ *                             2*r
+ *              exp(r) = 1 + -------
+ *                            R - r
+ *                                 r*R1(r)
+ *                     = 1 + r + ----------- (for better accuracy)
+ *                                2 - R1(r)
+ *      where
+ *                               2       4             10
+ *              R1(r) = r - (P1*r  + P2*r  + ... + P5*r   ).
+ *
+ *   3. Scale back to obtain exp(x):
+ *      From step 1, we have
+ *         exp(x) = 2^k * exp(r)
+ *
+ * Special cases:
+ *      exp(INF) is INF, exp(NaN) is NaN;
+ *      exp(-INF) is 0, and
+ *      for finite argument, only exp(0)=1 is exact.
+ *
+ * Accuracy:
+ *      according to an error analysis, the error is always less than
+ *      1 ulp (unit in the last place).
+ *
+ * Misc. info.
+ *      For IEEE double
+ *          if x >  7.09782712893383973096e+02 then exp(x) overflow
+ *          if x < -7.45133219101941108420e+02 then exp(x) underflow
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+double exp(double x) {
+  static const double
+      one = 1.0,
+      halF[2] = {0.5, -0.5},
+      o_threshold = 7.09782712893383973096e+02,  /* 0x40862E42, 0xFEFA39EF */
+      u_threshold = -7.45133219101941108420e+02, /* 0xc0874910, 0xD52D3051 */
+      ln2HI[2] = {6.93147180369123816490e-01,    /* 0x3fe62e42, 0xfee00000 */
+                  -6.93147180369123816490e-01},  /* 0xbfe62e42, 0xfee00000 */
+      ln2LO[2] = {1.90821492927058770002e-10,    /* 0x3dea39ef, 0x35793c76 */
+                  -1.90821492927058770002e-10},  /* 0xbdea39ef, 0x35793c76 */
+      invln2 = 1.44269504088896338700e+00,       /* 0x3ff71547, 0x652b82fe */
+      P1 = 1.66666666666666019037e-01,           /* 0x3FC55555, 0x5555553E */
+      P2 = -2.77777777770155933842e-03,          /* 0xBF66C16C, 0x16BEBD93 */
+      P3 = 6.61375632143793436117e-05,           /* 0x3F11566A, 0xAF25DE2C */
+      P4 = -1.65339022054652515390e-06,          /* 0xBEBBBD41, 0xC5D26BF1 */
+      P5 = 4.13813679705723846039e-08,           /* 0x3E663769, 0x72BEA4D0 */
+      E = 2.718281828459045;                     /* 0x4005bf0a, 0x8b145769 */
+
+  static volatile double
+      huge = 1.0e+300,
+      twom1000 = 9.33263618503218878990e-302, /* 2**-1000=0x01700000,0*/
+      two1023 = 8.988465674311579539e307;     /* 0x1p1023 */
+
+  double y, hi = 0.0, lo = 0.0, c, t, twopk;
+  int32_t k = 0, xsb;
+  uint32_t hx;
+
+  GET_HIGH_WORD(hx, x);
+  xsb = (hx >> 31) & 1; /* sign bit of x */
+  hx &= 0x7fffffff;     /* high word of |x| */
+
+  /* filter out non-finite argument */
+  if (hx >= 0x40862E42) { /* if |x|>=709.78... */
+    if (hx >= 0x7ff00000) {
+      uint32_t lx;
+      GET_LOW_WORD(lx, x);
+      if (((hx & 0xfffff) | lx) != 0)
+        return x + x; /* NaN */
+      else
+        return (xsb == 0) ? x : 0.0; /* exp(+-inf)={inf,0} */
+    }
+    if (x > o_threshold) return huge * huge;         /* overflow */
+    if (x < u_threshold) return twom1000 * twom1000; /* underflow */
+  }
+
+  /* argument reduction */
+  if (hx > 0x3fd62e42) {   /* if  |x| > 0.5 ln2 */
+    if (hx < 0x3FF0A2B2) { /* and |x| < 1.5 ln2 */
+      /* TODO(rtoy): We special case exp(1) here to return the correct
+       * value of E, as the computation below would get the last bit
+       * wrong. We should probably fix the algorithm instead.
+       */
+      if (x == 1.0) return E;
+      hi = x - ln2HI[xsb];
+      lo = ln2LO[xsb];
+      k = 1 - xsb - xsb;
+    } else {
+      k = static_cast<int>(invln2 * x + halF[xsb]);
+      t = k;
+      hi = x - t * ln2HI[0]; /* t*ln2HI is exact here */
+      lo = t * ln2LO[0];
+    }
+    STRICT_ASSIGN(double, x, hi - lo);
+  } else if (hx < 0x3e300000) {         /* when |x|<2**-28 */
+    if (huge + x > one) return one + x; /* trigger inexact */
+  } else {
+    k = 0;
+  }
+
+  /* x is now in primary range */
+  t = x * x;
+  if (k >= -1021) {
+    INSERT_WORDS(twopk, 0x3ff00000 + (k << 20), 0);
+  } else {
+    INSERT_WORDS(twopk, 0x3ff00000 + ((k + 1000) << 20), 0);
+  }
+  c = x - t * (P1 + t * (P2 + t * (P3 + t * (P4 + t * P5))));
+  if (k == 0) {
+    return one - ((x * c) / (c - 2.0) - x);
+  } else {
+    y = one - ((lo - (x * c) / (2.0 - c)) - hi);
+  }
+  if (k >= -1021) {
+    if (k == 1024) return y * 2.0 * two1023;
+    return y * twopk;
+  } else {
+    return y * twopk * twom1000;
+  }
+}
+
+/*
+ * Method :
+ *    1.Reduced x to positive by atanh(-x) = -atanh(x)
+ *    2.For x>=0.5
+ *              1              2x                          x
+ *  atanh(x) = --- * log(1 + -------) = 0.5 * log1p(2 * --------)
+ *              2             1 - x                      1 - x
+ *
+ *   For x<0.5
+ *  atanh(x) = 0.5*log1p(2x+2x*x/(1-x))
+ *
+ * Special cases:
+ *  atanh(x) is NaN if |x| > 1 with signal;
+ *  atanh(NaN) is that NaN with no signal;
+ *  atanh(+-1) is +-INF with signal.
+ *
+ */
+double atanh(double x) {
+  static const double one = 1.0, huge = 1e300;
+  static const double zero = 0.0;
+
+  double t;
+  int32_t hx, ix;
+  uint32_t lx;
+  EXTRACT_WORDS(hx, lx, x);
+  ix = hx & 0x7fffffff;
+  if ((ix | ((lx | -static_cast<int32_t>(lx)) >> 31)) > 0x3ff00000) /* |x|>1 */
+    return (x - x) / (x - x);
+  if (ix == 0x3ff00000) return x / zero;
+  if (ix < 0x3e300000 && (huge + x) > zero) return x; /* x<2**-28 */
+  SET_HIGH_WORD(x, ix);
+  if (ix < 0x3fe00000) { /* x < 0.5 */
+    t = x + x;
+    t = 0.5 * log1p(t + t * x / (one - x));
+  } else {
+    t = 0.5 * log1p((x + x) / (one - x));
+  }
+  if (hx >= 0)
+    return t;
+  else
+    return -t;
+}
+
+/* log(x)
+ * Return the logrithm of x
+ *
+ * Method :
+ *   1. Argument Reduction: find k and f such that
+ *     x = 2^k * (1+f),
+ *     where  sqrt(2)/2 < 1+f < sqrt(2) .
+ *
+ *   2. Approximation of log(1+f).
+ *  Let s = f/(2+f) ; based on log(1+f) = log(1+s) - log(1-s)
+ *     = 2s + 2/3 s**3 + 2/5 s**5 + .....,
+ *         = 2s + s*R
+ *      We use a special Reme algorithm on [0,0.1716] to generate
+ *  a polynomial of degree 14 to approximate R The maximum error
+ *  of this polynomial approximation is bounded by 2**-58.45. In
+ *  other words,
+ *            2      4      6      8      10      12      14
+ *      R(z) ~ Lg1*s +Lg2*s +Lg3*s +Lg4*s +Lg5*s  +Lg6*s  +Lg7*s
+ *    (the values of Lg1 to Lg7 are listed in the program)
+ *  and
+ *      |      2          14          |     -58.45
+ *      | Lg1*s +...+Lg7*s    -  R(z) | <= 2
+ *      |                             |
+ *  Note that 2s = f - s*f = f - hfsq + s*hfsq, where hfsq = f*f/2.
+ *  In order to guarantee error in log below 1ulp, we compute log
+ *  by
+ *    log(1+f) = f - s*(f - R)  (if f is not too large)
+ *    log(1+f) = f - (hfsq - s*(hfsq+R)). (better accuracy)
+ *
+ *  3. Finally,  log(x) = k*ln2 + log(1+f).
+ *          = k*ln2_hi+(f-(hfsq-(s*(hfsq+R)+k*ln2_lo)))
+ *     Here ln2 is split into two floating point number:
+ *      ln2_hi + ln2_lo,
+ *     where n*ln2_hi is always exact for |n| < 2000.
+ *
+ * Special cases:
+ *  log(x) is NaN with signal if x < 0 (including -INF) ;
+ *  log(+INF) is +INF; log(0) is -INF with signal;
+ *  log(NaN) is that NaN with no signal.
+ *
+ * Accuracy:
+ *  according to an error analysis, the error is always less than
+ *  1 ulp (unit in the last place).
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+double log(double x) {
+  static const double                      /* -- */
+      ln2_hi = 6.93147180369123816490e-01, /* 3fe62e42 fee00000 */
+      ln2_lo = 1.90821492927058770002e-10, /* 3dea39ef 35793c76 */
+      two54 = 1.80143985094819840000e+16,  /* 43500000 00000000 */
+      Lg1 = 6.666666666666735130e-01,      /* 3FE55555 55555593 */
+      Lg2 = 3.999999999940941908e-01,      /* 3FD99999 9997FA04 */
+      Lg3 = 2.857142874366239149e-01,      /* 3FD24924 94229359 */
+      Lg4 = 2.222219843214978396e-01,      /* 3FCC71C5 1D8E78AF */
+      Lg5 = 1.818357216161805012e-01,      /* 3FC74664 96CB03DE */
+      Lg6 = 1.531383769920937332e-01,      /* 3FC39A09 D078C69F */
+      Lg7 = 1.479819860511658591e-01;      /* 3FC2F112 DF3E5244 */
+
+  static const double zero = 0.0;
+  static volatile double vzero = 0.0;
+
+  double hfsq, f, s, z, R, w, t1, t2, dk;
+  int32_t k, hx, i, j;
+  uint32_t lx;
+
+  EXTRACT_WORDS(hx, lx, x);
+
+  k = 0;
+  if (hx < 0x00100000) { /* x < 2**-1022  */
+    if (((hx & 0x7fffffff) | lx) == 0)
+      return -two54 / vzero;           /* log(+-0)=-inf */
+    if (hx < 0) return (x - x) / zero; /* log(-#) = NaN */
+    k -= 54;
+    x *= two54; /* subnormal number, scale up x */
+    GET_HIGH_WORD(hx, x);
+  }
+  if (hx >= 0x7ff00000) return x + x;
+  k += (hx >> 20) - 1023;
+  hx &= 0x000fffff;
+  i = (hx + 0x95f64) & 0x100000;
+  SET_HIGH_WORD(x, hx | (i ^ 0x3ff00000)); /* normalize x or x/2 */
+  k += (i >> 20);
+  f = x - 1.0;
+  if ((0x000fffff & (2 + hx)) < 3) { /* -2**-20 <= f < 2**-20 */
+    if (f == zero) {
+      if (k == 0) {
+        return zero;
+      } else {
+        dk = static_cast<double>(k);
+        return dk * ln2_hi + dk * ln2_lo;
+      }
+    }
+    R = f * f * (0.5 - 0.33333333333333333 * f);
+    if (k == 0) {
+      return f - R;
+    } else {
+      dk = static_cast<double>(k);
+      return dk * ln2_hi - ((R - dk * ln2_lo) - f);
+    }
+  }
+  s = f / (2.0 + f);
+  dk = static_cast<double>(k);
+  z = s * s;
+  i = hx - 0x6147a;
+  w = z * z;
+  j = 0x6b851 - hx;
+  t1 = w * (Lg2 + w * (Lg4 + w * Lg6));
+  t2 = z * (Lg1 + w * (Lg3 + w * (Lg5 + w * Lg7)));
+  i |= j;
+  R = t2 + t1;
+  if (i > 0) {
+    hfsq = 0.5 * f * f;
+    if (k == 0)
+      return f - (hfsq - s * (hfsq + R));
+    else
+      return dk * ln2_hi - ((hfsq - (s * (hfsq + R) + dk * ln2_lo)) - f);
+  } else {
+    if (k == 0)
+      return f - s * (f - R);
+    else
+      return dk * ln2_hi - ((s * (f - R) - dk * ln2_lo) - f);
+  }
+}
+
+/* double log1p(double x)
+ *
+ * Method :
+ *   1. Argument Reduction: find k and f such that
+ *      1+x = 2^k * (1+f),
+ *     where  sqrt(2)/2 < 1+f < sqrt(2) .
+ *
+ *      Note. If k=0, then f=x is exact. However, if k!=0, then f
+ *  may not be representable exactly. In that case, a correction
+ *  term is need. Let u=1+x rounded. Let c = (1+x)-u, then
+ *  log(1+x) - log(u) ~ c/u. Thus, we proceed to compute log(u),
+ *  and add back the correction term c/u.
+ *  (Note: when x > 2**53, one can simply return log(x))
+ *
+ *   2. Approximation of log1p(f).
+ *  Let s = f/(2+f) ; based on log(1+f) = log(1+s) - log(1-s)
+ *     = 2s + 2/3 s**3 + 2/5 s**5 + .....,
+ *         = 2s + s*R
+ *      We use a special Reme algorithm on [0,0.1716] to generate
+ *  a polynomial of degree 14 to approximate R The maximum error
+ *  of this polynomial approximation is bounded by 2**-58.45. In
+ *  other words,
+ *            2      4      6      8      10      12      14
+ *      R(z) ~ Lp1*s +Lp2*s +Lp3*s +Lp4*s +Lp5*s  +Lp6*s  +Lp7*s
+ *    (the values of Lp1 to Lp7 are listed in the program)
+ *  and
+ *      |      2          14          |     -58.45
+ *      | Lp1*s +...+Lp7*s    -  R(z) | <= 2
+ *      |                             |
+ *  Note that 2s = f - s*f = f - hfsq + s*hfsq, where hfsq = f*f/2.
+ *  In order to guarantee error in log below 1ulp, we compute log
+ *  by
+ *    log1p(f) = f - (hfsq - s*(hfsq+R)).
+ *
+ *  3. Finally, log1p(x) = k*ln2 + log1p(f).
+ *           = k*ln2_hi+(f-(hfsq-(s*(hfsq+R)+k*ln2_lo)))
+ *     Here ln2 is split into two floating point number:
+ *      ln2_hi + ln2_lo,
+ *     where n*ln2_hi is always exact for |n| < 2000.
+ *
+ * Special cases:
+ *  log1p(x) is NaN with signal if x < -1 (including -INF) ;
+ *  log1p(+INF) is +INF; log1p(-1) is -INF with signal;
+ *  log1p(NaN) is that NaN with no signal.
+ *
+ * Accuracy:
+ *  according to an error analysis, the error is always less than
+ *  1 ulp (unit in the last place).
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ *
+ * Note: Assuming log() return accurate answer, the following
+ *   algorithm can be used to compute log1p(x) to within a few ULP:
+ *
+ *    u = 1+x;
+ *    if(u==1.0) return x ; else
+ *         return log(u)*(x/(u-1.0));
+ *
+ *   See HP-15C Advanced Functions Handbook, p.193.
+ */
+double log1p(double x) {
+  static const double                      /* -- */
+      ln2_hi = 6.93147180369123816490e-01, /* 3fe62e42 fee00000 */
+      ln2_lo = 1.90821492927058770002e-10, /* 3dea39ef 35793c76 */
+      two54 = 1.80143985094819840000e+16,  /* 43500000 00000000 */
+      Lp1 = 6.666666666666735130e-01,      /* 3FE55555 55555593 */
+      Lp2 = 3.999999999940941908e-01,      /* 3FD99999 9997FA04 */
+      Lp3 = 2.857142874366239149e-01,      /* 3FD24924 94229359 */
+      Lp4 = 2.222219843214978396e-01,      /* 3FCC71C5 1D8E78AF */
+      Lp5 = 1.818357216161805012e-01,      /* 3FC74664 96CB03DE */
+      Lp6 = 1.531383769920937332e-01,      /* 3FC39A09 D078C69F */
+      Lp7 = 1.479819860511658591e-01;      /* 3FC2F112 DF3E5244 */
+
+  static const double zero = 0.0;
+  static volatile double vzero = 0.0;
+
+  double hfsq, f, c, s, z, R, u;
+  int32_t k, hx, hu, ax;
+
+  GET_HIGH_WORD(hx, x);
+  ax = hx & 0x7fffffff;
+
+  k = 1;
+  if (hx < 0x3FDA827A) {    /* 1+x < sqrt(2)+ */
+    if (ax >= 0x3ff00000) { /* x <= -1.0 */
+      if (x == -1.0)
+        return -two54 / vzero; /* log1p(-1)=+inf */
+      else
+        return (x - x) / (x - x); /* log1p(x<-1)=NaN */
+    }
+    if (ax < 0x3e200000) {    /* |x| < 2**-29 */
+      if (two54 + x > zero    /* raise inexact */
+          && ax < 0x3c900000) /* |x| < 2**-54 */
+        return x;
+      else
+        return x - x * x * 0.5;
+    }
+    if (hx > 0 || hx <= static_cast<int32_t>(0xbfd2bec4)) {
+      k = 0;
+      f = x;
+      hu = 1;
+    } /* sqrt(2)/2- <= 1+x < sqrt(2)+ */
+  }
+  if (hx >= 0x7ff00000) return x + x;
+  if (k != 0) {
+    if (hx < 0x43400000) {
+      STRICT_ASSIGN(double, u, 1.0 + x);
+      GET_HIGH_WORD(hu, u);
+      k = (hu >> 20) - 1023;
+      c = (k > 0) ? 1.0 - (u - x) : x - (u - 1.0); /* correction term */
+      c /= u;
+    } else {
+      u = x;
+      GET_HIGH_WORD(hu, u);
+      k = (hu >> 20) - 1023;
+      c = 0;
+    }
+    hu &= 0x000fffff;
+    /*
+     * The approximation to sqrt(2) used in thresholds is not
+     * critical.  However, the ones used above must give less
+     * strict bounds than the one here so that the k==0 case is
+     * never reached from here, since here we have committed to
+     * using the correction term but don't use it if k==0.
+     */
+    if (hu < 0x6a09e) {                  /* u ~< sqrt(2) */
+      SET_HIGH_WORD(u, hu | 0x3ff00000); /* normalize u */
+    } else {
+      k += 1;
+      SET_HIGH_WORD(u, hu | 0x3fe00000); /* normalize u/2 */
+      hu = (0x00100000 - hu) >> 2;
+    }
+    f = u - 1.0;
+  }
+  hfsq = 0.5 * f * f;
+  if (hu == 0) { /* |f| < 2**-20 */
+    if (f == zero) {
+      if (k == 0) {
+        return zero;
+      } else {
+        c += k * ln2_lo;
+        return k * ln2_hi + c;
+      }
+    }
+    R = hfsq * (1.0 - 0.66666666666666666 * f);
+    if (k == 0)
+      return f - R;
+    else
+      return k * ln2_hi - ((R - (k * ln2_lo + c)) - f);
+  }
+  s = f / (2.0 + f);
+  z = s * s;
+  R = z * (Lp1 +
+           z * (Lp2 + z * (Lp3 + z * (Lp4 + z * (Lp5 + z * (Lp6 + z * Lp7))))));
+  if (k == 0)
+    return f - (hfsq - s * (hfsq + R));
+  else
+    return k * ln2_hi - ((hfsq - (s * (hfsq + R) + (k * ln2_lo + c))) - f);
+}
+
+/*
+ * k_log1p(f):
+ * Return log(1+f) - f for 1+f in ~[sqrt(2)/2, sqrt(2)].
+ *
+ * The following describes the overall strategy for computing
+ * logarithms in base e.  The argument reduction and adding the final
+ * term of the polynomial are done by the caller for increased accuracy
+ * when different bases are used.
+ *
+ * Method :
+ *   1. Argument Reduction: find k and f such that
+ *         x = 2^k * (1+f),
+ *         where  sqrt(2)/2 < 1+f < sqrt(2) .
+ *
+ *   2. Approximation of log(1+f).
+ *      Let s = f/(2+f) ; based on log(1+f) = log(1+s) - log(1-s)
+ *            = 2s + 2/3 s**3 + 2/5 s**5 + .....,
+ *            = 2s + s*R
+ *      We use a special Reme algorithm on [0,0.1716] to generate
+ *      a polynomial of degree 14 to approximate R The maximum error
+ *      of this polynomial approximation is bounded by 2**-58.45. In
+ *      other words,
+ *          2      4      6      8      10      12      14
+ *          R(z) ~ Lg1*s +Lg2*s +Lg3*s +Lg4*s +Lg5*s  +Lg6*s  +Lg7*s
+ *      (the values of Lg1 to Lg7 are listed in the program)
+ *      and
+ *          |      2          14          |     -58.45
+ *          | Lg1*s +...+Lg7*s    -  R(z) | <= 2
+ *          |                             |
+ *      Note that 2s = f - s*f = f - hfsq + s*hfsq, where hfsq = f*f/2.
+ *      In order to guarantee error in log below 1ulp, we compute log
+ *      by
+ *          log(1+f) = f - s*(f - R)            (if f is not too large)
+ *          log(1+f) = f - (hfsq - s*(hfsq+R)). (better accuracy)
+ *
+ *   3. Finally,  log(x) = k*ln2 + log(1+f).
+ *          = k*ln2_hi+(f-(hfsq-(s*(hfsq+R)+k*ln2_lo)))
+ *      Here ln2 is split into two floating point number:
+ *          ln2_hi + ln2_lo,
+ *      where n*ln2_hi is always exact for |n| < 2000.
+ *
+ * Special cases:
+ *      log(x) is NaN with signal if x < 0 (including -INF) ;
+ *      log(+INF) is +INF; log(0) is -INF with signal;
+ *      log(NaN) is that NaN with no signal.
+ *
+ * Accuracy:
+ *      according to an error analysis, the error is always less than
+ *      1 ulp (unit in the last place).
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+
+static const double Lg1 = 6.666666666666735130e-01, /* 3FE55555 55555593 */
+    Lg2 = 3.999999999940941908e-01,                 /* 3FD99999 9997FA04 */
+    Lg3 = 2.857142874366239149e-01,                 /* 3FD24924 94229359 */
+    Lg4 = 2.222219843214978396e-01,                 /* 3FCC71C5 1D8E78AF */
+    Lg5 = 1.818357216161805012e-01,                 /* 3FC74664 96CB03DE */
+    Lg6 = 1.531383769920937332e-01,                 /* 3FC39A09 D078C69F */
+    Lg7 = 1.479819860511658591e-01;                 /* 3FC2F112 DF3E5244 */
+
+/*
+ * We always inline k_log1p(), since doing so produces a
+ * substantial performance improvement (~40% on amd64).
+ */
+static inline double k_log1p(double f) {
+  double hfsq, s, z, R, w, t1, t2;
+
+  s = f / (2.0 + f);
+  z = s * s;
+  w = z * z;
+  t1 = w * (Lg2 + w * (Lg4 + w * Lg6));
+  t2 = z * (Lg1 + w * (Lg3 + w * (Lg5 + w * Lg7)));
+  R = t2 + t1;
+  hfsq = 0.5 * f * f;
+  return s * (hfsq + R);
+}
+
+/*
+ * Return the base 2 logarithm of x.  See e_log.c and k_log.h for most
+ * comments.
+ *
+ * This reduces x to {k, 1+f} exactly as in e_log.c, then calls the kernel,
+ * then does the combining and scaling steps
+ *    log2(x) = (f - 0.5*f*f + k_log1p(f)) / ln2 + k
+ * in not-quite-routine extra precision.
+ */
+double log2(double x) {
+  static const double
+      two54 = 1.80143985094819840000e+16,   /* 0x43500000, 0x00000000 */
+      ivln2hi = 1.44269504072144627571e+00, /* 0x3ff71547, 0x65200000 */
+      ivln2lo = 1.67517131648865118353e-10; /* 0x3de705fc, 0x2eefa200 */
+
+  static const double zero = 0.0;
+  static volatile double vzero = 0.0;
+
+  double f, hfsq, hi, lo, r, val_hi, val_lo, w, y;
+  int32_t i, k, hx;
+  uint32_t lx;
+
+  EXTRACT_WORDS(hx, lx, x);
+
+  k = 0;
+  if (hx < 0x00100000) { /* x < 2**-1022  */
+    if (((hx & 0x7fffffff) | lx) == 0)
+      return -two54 / vzero;           /* log(+-0)=-inf */
+    if (hx < 0) return (x - x) / zero; /* log(-#) = NaN */
+    k -= 54;
+    x *= two54; /* subnormal number, scale up x */
+    GET_HIGH_WORD(hx, x);
+  }
+  if (hx >= 0x7ff00000) return x + x;
+  if (hx == 0x3ff00000 && lx == 0) return zero; /* log(1) = +0 */
+  k += (hx >> 20) - 1023;
+  hx &= 0x000fffff;
+  i = (hx + 0x95f64) & 0x100000;
+  SET_HIGH_WORD(x, hx | (i ^ 0x3ff00000)); /* normalize x or x/2 */
+  k += (i >> 20);
+  y = static_cast<double>(k);
+  f = x - 1.0;
+  hfsq = 0.5 * f * f;
+  r = k_log1p(f);
+
+  /*
+   * f-hfsq must (for args near 1) be evaluated in extra precision
+   * to avoid a large cancellation when x is near sqrt(2) or 1/sqrt(2).
+   * This is fairly efficient since f-hfsq only depends on f, so can
+   * be evaluated in parallel with R.  Not combining hfsq with R also
+   * keeps R small (though not as small as a true `lo' term would be),
+   * so that extra precision is not needed for terms involving R.
+   *
+   * Compiler bugs involving extra precision used to break Dekker's
+   * theorem for spitting f-hfsq as hi+lo, unless double_t was used
+   * or the multi-precision calculations were avoided when double_t
+   * has extra precision.  These problems are now automatically
+   * avoided as a side effect of the optimization of combining the
+   * Dekker splitting step with the clear-low-bits step.
+   *
+   * y must (for args near sqrt(2) and 1/sqrt(2)) be added in extra
+   * precision to avoid a very large cancellation when x is very near
+   * these values.  Unlike the above cancellations, this problem is
+   * specific to base 2.  It is strange that adding +-1 is so much
+   * harder than adding +-ln2 or +-log10_2.
+   *
+   * This uses Dekker's theorem to normalize y+val_hi, so the
+   * compiler bugs are back in some configurations, sigh.  And I
+   * don't want to used double_t to avoid them, since that gives a
+   * pessimization and the support for avoiding the pessimization
+   * is not yet available.
+   *
+   * The multi-precision calculations for the multiplications are
+   * routine.
+   */
+  hi = f - hfsq;
+  SET_LOW_WORD(hi, 0);
+  lo = (f - hi) - hfsq + r;
+  val_hi = hi * ivln2hi;
+  val_lo = (lo + hi) * ivln2lo + lo * ivln2hi;
+
+  /* spadd(val_hi, val_lo, y), except for not using double_t: */
+  w = y + val_hi;
+  val_lo += (y - w) + val_hi;
+  val_hi = w;
+
+  return val_lo + val_hi;
+}
+
+/*
+ * Return the base 10 logarithm of x
+ *
+ * Method :
+ *      Let log10_2hi = leading 40 bits of log10(2) and
+ *          log10_2lo = log10(2) - log10_2hi,
+ *          ivln10   = 1/log(10) rounded.
+ *      Then
+ *              n = ilogb(x),
+ *              if(n<0)  n = n+1;
+ *              x = scalbn(x,-n);
+ *              log10(x) := n*log10_2hi + (n*log10_2lo + ivln10*log(x))
+ *
+ *  Note 1:
+ *     To guarantee log10(10**n)=n, where 10**n is normal, the rounding
+ *     mode must set to Round-to-Nearest.
+ *  Note 2:
+ *      [1/log(10)] rounded to 53 bits has error .198 ulps;
+ *      log10 is monotonic at all binary break points.
+ *
+ *  Special cases:
+ *      log10(x) is NaN if x < 0;
+ *      log10(+INF) is +INF; log10(0) is -INF;
+ *      log10(NaN) is that NaN;
+ *      log10(10**N) = N  for N=0,1,...,22.
+ */
+double log10(double x) {
+  static const double
+      two54 = 1.80143985094819840000e+16, /* 0x43500000, 0x00000000 */
+      ivln10 = 4.34294481903251816668e-01,
+      log10_2hi = 3.01029995663611771306e-01, /* 0x3FD34413, 0x509F6000 */
+      log10_2lo = 3.69423907715893078616e-13; /* 0x3D59FEF3, 0x11F12B36 */
+
+  static const double zero = 0.0;
+  static volatile double vzero = 0.0;
+
+  double y;
+  int32_t i, k, hx;
+  uint32_t lx;
+
+  EXTRACT_WORDS(hx, lx, x);
+
+  k = 0;
+  if (hx < 0x00100000) { /* x < 2**-1022  */
+    if (((hx & 0x7fffffff) | lx) == 0)
+      return -two54 / vzero;           /* log(+-0)=-inf */
+    if (hx < 0) return (x - x) / zero; /* log(-#) = NaN */
+    k -= 54;
+    x *= two54; /* subnormal number, scale up x */
+    GET_HIGH_WORD(hx, x);
+    GET_LOW_WORD(lx, x);
+  }
+  if (hx >= 0x7ff00000) return x + x;
+  if (hx == 0x3ff00000 && lx == 0) return zero; /* log(1) = +0 */
+  k += (hx >> 20) - 1023;
+
+  i = (k & 0x80000000) >> 31;
+  hx = (hx & 0x000fffff) | ((0x3ff - i) << 20);
+  y = k + i;
+  SET_HIGH_WORD(x, hx);
+  SET_LOW_WORD(x, lx);
+
+  double z = y * log10_2lo + ivln10 * log(x);
+  return z + y * log10_2hi;
+}
+
+/* expm1(x)
+ * Returns exp(x)-1, the exponential of x minus 1.
+ *
+ * Method
+ *   1. Argument reduction:
+ *  Given x, find r and integer k such that
+ *
+ *               x = k*ln2 + r,  |r| <= 0.5*ln2 ~ 0.34658
+ *
+ *      Here a correction term c will be computed to compensate
+ *  the error in r when rounded to a floating-point number.
+ *
+ *   2. Approximating expm1(r) by a special rational function on
+ *  the interval [0,0.34658]:
+ *  Since
+ *      r*(exp(r)+1)/(exp(r)-1) = 2+ r^2/6 - r^4/360 + ...
+ *  we define R1(r*r) by
+ *      r*(exp(r)+1)/(exp(r)-1) = 2+ r^2/6 * R1(r*r)
+ *  That is,
+ *      R1(r**2) = 6/r *((exp(r)+1)/(exp(r)-1) - 2/r)
+ *         = 6/r * ( 1 + 2.0*(1/(exp(r)-1) - 1/r))
+ *         = 1 - r^2/60 + r^4/2520 - r^6/100800 + ...
+ *      We use a special Reme algorithm on [0,0.347] to generate
+ *   a polynomial of degree 5 in r*r to approximate R1. The
+ *  maximum error of this polynomial approximation is bounded
+ *  by 2**-61. In other words,
+ *      R1(z) ~ 1.0 + Q1*z + Q2*z**2 + Q3*z**3 + Q4*z**4 + Q5*z**5
+ *  where   Q1  =  -1.6666666666666567384E-2,
+ *     Q2  =   3.9682539681370365873E-4,
+ *     Q3  =  -9.9206344733435987357E-6,
+ *     Q4  =   2.5051361420808517002E-7,
+ *     Q5  =  -6.2843505682382617102E-9;
+ *    z   =  r*r,
+ *  with error bounded by
+ *      |                  5           |     -61
+ *      | 1.0+Q1*z+...+Q5*z   -  R1(z) | <= 2
+ *      |                              |
+ *
+ *  expm1(r) = exp(r)-1 is then computed by the following
+ *   specific way which minimize the accumulation rounding error:
+ *             2     3
+ *            r     r    [ 3 - (R1 + R1*r/2)  ]
+ *        expm1(r) = r + --- + --- * [--------------------]
+ *                  2     2    [ 6 - r*(3 - R1*r/2) ]
+ *
+ *  To compensate the error in the argument reduction, we use
+ *    expm1(r+c) = expm1(r) + c + expm1(r)*c
+ *         ~ expm1(r) + c + r*c
+ *  Thus c+r*c will be added in as the correction terms for
+ *  expm1(r+c). Now rearrange the term to avoid optimization
+ *   screw up:
+ *            (      2                                    2 )
+ *            ({  ( r    [ R1 -  (3 - R1*r/2) ]  )  }    r  )
+ *   expm1(r+c)~r - ({r*(--- * [--------------------]-c)-c} - --- )
+ *                  ({  ( 2    [ 6 - r*(3 - R1*r/2) ]  )  }    2  )
+ *                      (                                             )
+ *
+ *       = r - E
+ *   3. Scale back to obtain expm1(x):
+ *  From step 1, we have
+ *     expm1(x) = either 2^k*[expm1(r)+1] - 1
+ *        = or     2^k*[expm1(r) + (1-2^-k)]
+ *   4. Implementation notes:
+ *  (A). To save one multiplication, we scale the coefficient Qi
+ *       to Qi*2^i, and replace z by (x^2)/2.
+ *  (B). To achieve maximum accuracy, we compute expm1(x) by
+ *    (i)   if x < -56*ln2, return -1.0, (raise inexact if x!=inf)
+ *    (ii)  if k=0, return r-E
+ *    (iii) if k=-1, return 0.5*(r-E)-0.5
+ *        (iv)  if k=1 if r < -0.25, return 2*((r+0.5)- E)
+ *                  else       return  1.0+2.0*(r-E);
+ *    (v)   if (k<-2||k>56) return 2^k(1-(E-r)) - 1 (or exp(x)-1)
+ *    (vi)  if k <= 20, return 2^k((1-2^-k)-(E-r)), else
+ *    (vii) return 2^k(1-((E+2^-k)-r))
+ *
+ * Special cases:
+ *  expm1(INF) is INF, expm1(NaN) is NaN;
+ *  expm1(-INF) is -1, and
+ *  for finite argument, only expm1(0)=0 is exact.
+ *
+ * Accuracy:
+ *  according to an error analysis, the error is always less than
+ *  1 ulp (unit in the last place).
+ *
+ * Misc. info.
+ *  For IEEE double
+ *      if x >  7.09782712893383973096e+02 then expm1(x) overflow
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+double expm1(double x) {
+  static const double
+      one = 1.0,
+      tiny = 1.0e-300,
+      o_threshold = 7.09782712893383973096e+02, /* 0x40862E42, 0xFEFA39EF */
+      ln2_hi = 6.93147180369123816490e-01,      /* 0x3fe62e42, 0xfee00000 */
+      ln2_lo = 1.90821492927058770002e-10,      /* 0x3dea39ef, 0x35793c76 */
+      invln2 = 1.44269504088896338700e+00,      /* 0x3ff71547, 0x652b82fe */
+      /* Scaled Q's: Qn_here = 2**n * Qn_above, for R(2*z) where z = hxs =
+         x*x/2: */
+      Q1 = -3.33333333333331316428e-02, /* BFA11111 111110F4 */
+      Q2 = 1.58730158725481460165e-03,  /* 3F5A01A0 19FE5585 */
+      Q3 = -7.93650757867487942473e-05, /* BF14CE19 9EAADBB7 */
+      Q4 = 4.00821782732936239552e-06,  /* 3ED0CFCA 86E65239 */
+      Q5 = -2.01099218183624371326e-07; /* BE8AFDB7 6E09C32D */
+
+  static volatile double huge = 1.0e+300;
+
+  double y, hi, lo, c, t, e, hxs, hfx, r1, twopk;
+  int32_t k, xsb;
+  uint32_t hx;
+
+  GET_HIGH_WORD(hx, x);
+  xsb = hx & 0x80000000; /* sign bit of x */
+  hx &= 0x7fffffff;      /* high word of |x| */
+
+  /* filter out huge and non-finite argument */
+  if (hx >= 0x4043687A) {   /* if |x|>=56*ln2 */
+    if (hx >= 0x40862E42) { /* if |x|>=709.78... */
+      if (hx >= 0x7ff00000) {
+        uint32_t low;
+        GET_LOW_WORD(low, x);
+        if (((hx & 0xfffff) | low) != 0)
+          return x + x; /* NaN */
+        else
+          return (xsb == 0) ? x : -1.0; /* exp(+-inf)={inf,-1} */
+      }
+      if (x > o_threshold) return huge * huge; /* overflow */
+    }
+    if (xsb != 0) {        /* x < -56*ln2, return -1.0 with inexact */
+      if (x + tiny < 0.0)  /* raise inexact */
+        return tiny - one; /* return -1 */
+    }
+  }
+
+  /* argument reduction */
+  if (hx > 0x3fd62e42) {   /* if  |x| > 0.5 ln2 */
+    if (hx < 0x3FF0A2B2) { /* and |x| < 1.5 ln2 */
+      if (xsb == 0) {
+        hi = x - ln2_hi;
+        lo = ln2_lo;
+        k = 1;
+      } else {
+        hi = x + ln2_hi;
+        lo = -ln2_lo;
+        k = -1;
+      }
+    } else {
+      k = invln2 * x + ((xsb == 0) ? 0.5 : -0.5);
+      t = k;
+      hi = x - t * ln2_hi; /* t*ln2_hi is exact here */
+      lo = t * ln2_lo;
+    }
+    STRICT_ASSIGN(double, x, hi - lo);
+    c = (hi - x) - lo;
+  } else if (hx < 0x3c900000) { /* when |x|<2**-54, return x */
+    t = huge + x;               /* return x with inexact flags when x!=0 */
+    return x - (t - (huge + x));
+  } else {
+    k = 0;
+  }
+
+  /* x is now in primary range */
+  hfx = 0.5 * x;
+  hxs = x * hfx;
+  r1 = one + hxs * (Q1 + hxs * (Q2 + hxs * (Q3 + hxs * (Q4 + hxs * Q5))));
+  t = 3.0 - r1 * hfx;
+  e = hxs * ((r1 - t) / (6.0 - x * t));
+  if (k == 0) {
+    return x - (x * e - hxs); /* c is 0 */
+  } else {
+    INSERT_WORDS(twopk, 0x3ff00000 + (k << 20), 0); /* 2^k */
+    e = (x * (e - c) - c);
+    e -= hxs;
+    if (k == -1) return 0.5 * (x - e) - 0.5;
+    if (k == 1) {
+      if (x < -0.25)
+        return -2.0 * (e - (x + 0.5));
+      else
+        return one + 2.0 * (x - e);
+    }
+    if (k <= -2 || k > 56) { /* suffice to return exp(x)-1 */
+      y = one - (e - x);
+      // TODO(mvstanton): is this replacement for the hex float
+      // sufficient?
+      // if (k == 1024) y = y*2.0*0x1p1023;
+      if (k == 1024)
+        y = y * 2.0 * 8.98846567431158e+307;
+      else
+        y = y * twopk;
+      return y - one;
+    }
+    t = one;
+    if (k < 20) {
+      SET_HIGH_WORD(t, 0x3ff00000 - (0x200000 >> k)); /* t=1-2^-k */
+      y = t - (e - x);
+      y = y * twopk;
+    } else {
+      SET_HIGH_WORD(t, ((0x3ff - k) << 20)); /* 2^-k */
+      y = x - (e + t);
+      y += one;
+      y = y * twopk;
+    }
+  }
+  return y;
+}
+
+double cbrt(double x) {
+  static const uint32_t
+      B1 = 715094163, /* B1 = (1023-1023/3-0.03306235651)*2**20 */
+      B2 = 696219795; /* B2 = (1023-1023/3-54/3-0.03306235651)*2**20 */
+
+  /* |1/cbrt(x) - p(x)| < 2**-23.5 (~[-7.93e-8, 7.929e-8]). */
+  static const double P0 = 1.87595182427177009643, /* 0x3ffe03e6, 0x0f61e692 */
+      P1 = -1.88497979543377169875,                /* 0xbffe28e0, 0x92f02420 */
+      P2 = 1.621429720105354466140,                /* 0x3ff9f160, 0x4a49d6c2 */
+      P3 = -0.758397934778766047437,               /* 0xbfe844cb, 0xbee751d9 */
+      P4 = 0.145996192886612446982;                /* 0x3fc2b000, 0xd4e4edd7 */
+
+  int32_t hx;
+  union {
+    double value;
+    uint64_t bits;
+  } u;
+  double r, s, t = 0.0, w;
+  uint32_t sign;
+  uint32_t high, low;
+
+  EXTRACT_WORDS(hx, low, x);
+  sign = hx & 0x80000000; /* sign= sign(x) */
+  hx ^= sign;
+  if (hx >= 0x7ff00000) return (x + x); /* cbrt(NaN,INF) is itself */
+
+  /*
+   * Rough cbrt to 5 bits:
+   *    cbrt(2**e*(1+m) ~= 2**(e/3)*(1+(e%3+m)/3)
+   * where e is integral and >= 0, m is real and in [0, 1), and "/" and
+   * "%" are integer division and modulus with rounding towards minus
+   * infinity.  The RHS is always >= the LHS and has a maximum relative
+   * error of about 1 in 16.  Adding a bias of -0.03306235651 to the
+   * (e%3+m)/3 term reduces the error to about 1 in 32. With the IEEE
+   * floating point representation, for finite positive normal values,
+   * ordinary integer divison of the value in bits magically gives
+   * almost exactly the RHS of the above provided we first subtract the
+   * exponent bias (1023 for doubles) and later add it back.  We do the
+   * subtraction virtually to keep e >= 0 so that ordinary integer
+   * division rounds towards minus infinity; this is also efficient.
+   */
+  if (hx < 0x00100000) {             /* zero or subnormal? */
+    if ((hx | low) == 0) return (x); /* cbrt(0) is itself */
+    SET_HIGH_WORD(t, 0x43500000);    /* set t= 2**54 */
+    t *= x;
+    GET_HIGH_WORD(high, t);
+    INSERT_WORDS(t, sign | ((high & 0x7fffffff) / 3 + B2), 0);
+  } else {
+    INSERT_WORDS(t, sign | (hx / 3 + B1), 0);
+  }
+
+  /*
+   * New cbrt to 23 bits:
+   *    cbrt(x) = t*cbrt(x/t**3) ~= t*P(t**3/x)
+   * where P(r) is a polynomial of degree 4 that approximates 1/cbrt(r)
+   * to within 2**-23.5 when |r - 1| < 1/10.  The rough approximation
+   * has produced t such than |t/cbrt(x) - 1| ~< 1/32, and cubing this
+   * gives us bounds for r = t**3/x.
+   *
+   * Try to optimize for parallel evaluation as in k_tanf.c.
+   */
+  r = (t * t) * (t / x);
+  t = t * ((P0 + r * (P1 + r * P2)) + ((r * r) * r) * (P3 + r * P4));
+
+  /*
+   * Round t away from zero to 23 bits (sloppily except for ensuring that
+   * the result is larger in magnitude than cbrt(x) but not much more than
+   * 2 23-bit ulps larger).  With rounding towards zero, the error bound
+   * would be ~5/6 instead of ~4/6.  With a maximum error of 2 23-bit ulps
+   * in the rounded t, the infinite-precision error in the Newton
+   * approximation barely affects third digit in the final error
+   * 0.667; the error in the rounded t can be up to about 3 23-bit ulps
+   * before the final error is larger than 0.667 ulps.
+   */
+  u.value = t;
+  u.bits = (u.bits + 0x80000000) & 0xffffffffc0000000ULL;
+  t = u.value;
+
+  /* one step Newton iteration to 53 bits with error < 0.667 ulps */
+  s = t * t;             /* t*t is exact */
+  r = x / s;             /* error <= 0.5 ulps; |r| < |t| */
+  w = t + t;             /* t+t is exact */
+  r = (r - t) / (w + r); /* r-t is exact; w+r ~= 3*t */
+  t = t + t * r;         /* error <= 0.5 + 0.5/3 + epsilon */
+
+  return (t);
+}
+
+/* sin(x)
+ * Return sine function of x.
+ *
+ * kernel function:
+ *      __kernel_sin            ... sine function on [-pi/4,pi/4]
+ *      __kernel_cos            ... cose function on [-pi/4,pi/4]
+ *      __ieee754_rem_pio2      ... argument reduction routine
+ *
+ * Method.
+ *      Let S,C and T denote the sin, cos and tan respectively on
+ *      [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2
+ *      in [-pi/4 , +pi/4], and let n = k mod 4.
+ *      We have
+ *
+ *          n        sin(x)      cos(x)        tan(x)
+ *     ----------------------------------------------------------
+ *          0          S           C             T
+ *          1          C          -S            -1/T
+ *          2         -S          -C             T
+ *          3         -C           S            -1/T
+ *     ----------------------------------------------------------
+ *
+ * Special cases:
+ *      Let trig be any of sin, cos, or tan.
+ *      trig(+-INF)  is NaN, with signals;
+ *      trig(NaN)    is that NaN;
+ *
+ * Accuracy:
+ *      TRIG(x) returns trig(x) nearly rounded
+ */
+double sin(double x) {
+  double y[2], z = 0.0;
+  int32_t n, ix;
+
+  /* High word of x. */
+  GET_HIGH_WORD(ix, x);
+
+  /* |x| ~< pi/4 */
+  ix &= 0x7fffffff;
+  if (ix <= 0x3fe921fb) {
+    return __kernel_sin(x, z, 0);
+  } else if (ix >= 0x7ff00000) {
+    /* sin(Inf or NaN) is NaN */
+    return x - x;
+  } else {
+    /* argument reduction needed */
+    n = __ieee754_rem_pio2(x, y);
+    switch (n & 3) {
+      case 0:
+        return __kernel_sin(y[0], y[1], 1);
+      case 1:
+        return __kernel_cos(y[0], y[1]);
+      case 2:
+        return -__kernel_sin(y[0], y[1], 1);
+      default:
+        return -__kernel_cos(y[0], y[1]);
+    }
+  }
+}
+
+/* tan(x)
+ * Return tangent function of x.
+ *
+ * kernel function:
+ *      __kernel_tan            ... tangent function on [-pi/4,pi/4]
+ *      __ieee754_rem_pio2      ... argument reduction routine
+ *
+ * Method.
+ *      Let S,C and T denote the sin, cos and tan respectively on
+ *      [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2
+ *      in [-pi/4 , +pi/4], and let n = k mod 4.
+ *      We have
+ *
+ *          n        sin(x)      cos(x)        tan(x)
+ *     ----------------------------------------------------------
+ *          0          S           C             T
+ *          1          C          -S            -1/T
+ *          2         -S          -C             T
+ *          3         -C           S            -1/T
+ *     ----------------------------------------------------------
+ *
+ * Special cases:
+ *      Let trig be any of sin, cos, or tan.
+ *      trig(+-INF)  is NaN, with signals;
+ *      trig(NaN)    is that NaN;
+ *
+ * Accuracy:
+ *      TRIG(x) returns trig(x) nearly rounded
+ */
+double tan(double x) {
+  double y[2], z = 0.0;
+  int32_t n, ix;
+
+  /* High word of x. */
+  GET_HIGH_WORD(ix, x);
+
+  /* |x| ~< pi/4 */
+  ix &= 0x7fffffff;
+  if (ix <= 0x3fe921fb) {
+    return __kernel_tan(x, z, 1);
+  } else if (ix >= 0x7ff00000) {
+    /* tan(Inf or NaN) is NaN */
+    return x - x; /* NaN */
+  } else {
+    /* argument reduction needed */
+    n = __ieee754_rem_pio2(x, y);
+    /* 1 -> n even, -1 -> n odd */
+    return __kernel_tan(y[0], y[1], 1 - ((n & 1) << 1));
+  }
+}
+
+}  // namespace ieee754
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/ieee754.h b/src/base/ieee754.h
new file mode 100644
index 0000000..cf33580
--- /dev/null
+++ b/src/base/ieee754.h
@@ -0,0 +1,57 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_IEEE754_H_
+#define V8_BASE_IEEE754_H_
+
+namespace v8 {
+namespace base {
+namespace ieee754 {
+
+// Returns the principal value of the arc tangent of |x|; that is the value
+// whose tangent is |x|.
+double atan(double x);
+
+// Returns the principal value of the arc tangent of |y/x|, using the signs of
+// the two arguments to determine the quadrant of the result.
+double atan2(double y, double x);
+
+// Returns the cosine of |x|, where |x| is given in radians.
+double cos(double x);
+
+// Returns the base-e exponential of |x|.
+double exp(double x);
+
+double atanh(double x);
+
+// Returns the natural logarithm of |x|.
+double log(double x);
+
+// Returns a value equivalent to |log(1+x)|, but computed in a way that is
+// accurate even if the value of |x| is near zero.
+double log1p(double x);
+
+// Returns the base 2 logarithm of |x|.
+double log2(double x);
+
+// Returns the base 10 logarithm of |x|.
+double log10(double x);
+
+// Returns the cube root of |x|.
+double cbrt(double x);
+
+// Returns exp(x)-1, the exponential of |x| minus 1.
+double expm1(double x);
+
+// Returns the sine of |x|, where |x| is given in radians.
+double sin(double x);
+
+// Returns the tangent of |x|, where |x| is given in radians.
+double tan(double x);
+
+}  // namespace ieee754
+}  // namespace base
+}  // namespace v8
+
+#endif  // V8_BASE_IEEE754_H_
diff --git a/src/base/platform/semaphore.cc b/src/base/platform/semaphore.cc
index a7e522a..7bf5986 100644
--- a/src/base/platform/semaphore.cc
+++ b/src/base/platform/semaphore.cc
@@ -87,10 +87,6 @@
       0, reinterpret_cast<uintptr_t>(&native_handle_) &
       kSemaphoreAlignmentMask);
   DCHECK(count >= 0);
-#if V8_LIBC_GLIBC
-  // sem_init in glibc prior to 2.1 does not zero out semaphores.
-  memset(&native_handle_, 0, sizeof(native_handle_));
-#endif
   int result = sem_init(&native_handle_, 0, count);
   DCHECK_EQ(0, result);
   USE(result);
@@ -105,6 +101,9 @@
 
 void Semaphore::Signal() {
   int result = sem_post(&native_handle_);
+  // This check may fail with <libc-2.21, which we use on the try bots, if the
+  // semaphore is destroyed while sem_post is still executed. A work around is
+  // to extend the lifetime of the semaphore.
   CHECK_EQ(0, result);
 }
 
diff --git a/src/base/platform/time.cc b/src/base/platform/time.cc
index b2355a3..786ef2e 100644
--- a/src/base/platform/time.cc
+++ b/src/base/platform/time.cc
@@ -41,9 +41,11 @@
   CHECK(kr == KERN_SUCCESS);
 
   v8::base::CheckedNumeric<int64_t> absolute_micros(
-      thread_info_data.user_time.seconds);
+      thread_info_data.user_time.seconds +
+      thread_info_data.system_time.seconds);
   absolute_micros *= v8::base::Time::kMicrosecondsPerSecond;
-  absolute_micros += thread_info_data.user_time.microseconds;
+  absolute_micros += (thread_info_data.user_time.microseconds +
+                      thread_info_data.system_time.microseconds);
   return absolute_micros.ValueOrDie();
 }
 #elif V8_OS_POSIX
@@ -51,9 +53,20 @@
 // microsecond timebase. Minimum requirement is MONOTONIC_CLOCK to be supported
 // on the system. FreeBSD 6 has CLOCK_MONOTONIC but defines
 // _POSIX_MONOTONIC_CLOCK to -1.
-inline int64_t ClockNow(clockid_t clk_id) {
+V8_INLINE int64_t ClockNow(clockid_t clk_id) {
 #if (defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0) || \
   defined(V8_OS_BSD) || defined(V8_OS_ANDROID)
+// On AIX clock_gettime for CLOCK_THREAD_CPUTIME_ID outputs time with
+// resolution of 10ms. thread_cputime API provides the time in ns
+#if defined(V8_OS_AIX)
+  thread_cputime_t tc;
+  if (clk_id == CLOCK_THREAD_CPUTIME_ID) {
+    if (thread_cputime(-1, &tc) != 0) {
+      UNREACHABLE();
+      return 0;
+    }
+  }
+#endif
   struct timespec ts;
   if (clock_gettime(clk_id, &ts) != 0) {
     UNREACHABLE();
@@ -61,12 +74,38 @@
   }
   v8::base::internal::CheckedNumeric<int64_t> result(ts.tv_sec);
   result *= v8::base::Time::kMicrosecondsPerSecond;
+#if defined(V8_OS_AIX)
+  if (clk_id == CLOCK_THREAD_CPUTIME_ID) {
+    result += (tc.stime / v8::base::Time::kNanosecondsPerMicrosecond);
+  } else {
+    result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond);
+  }
+#else
   result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond);
+#endif
   return result.ValueOrDie();
 #else  // Monotonic clock not supported.
   return 0;
 #endif
 }
+#elif V8_OS_WIN
+V8_INLINE bool IsQPCReliable() {
+  v8::base::CPU cpu;
+  // On Athlon X2 CPUs (e.g. model 15) QueryPerformanceCounter is unreliable.
+  return strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15;
+}
+
+// Returns the current value of the performance counter.
+V8_INLINE uint64_t QPCNowRaw() {
+  LARGE_INTEGER perf_counter_now = {};
+  // According to the MSDN documentation for QueryPerformanceCounter(), this
+  // will never fail on systems that run XP or later.
+  // https://msdn.microsoft.com/library/windows/desktop/ms644904.aspx
+  BOOL result = ::QueryPerformanceCounter(&perf_counter_now);
+  DCHECK(result);
+  USE(result);
+  return perf_counter_now.QuadPart;
+}
 #endif  // V8_OS_MACOSX
 
 
@@ -456,15 +495,12 @@
   virtual ~HighResolutionTickClock() {}
 
   int64_t Now() override {
-    LARGE_INTEGER now;
-    BOOL result = QueryPerformanceCounter(&now);
-    DCHECK(result);
-    USE(result);
+    uint64_t now = QPCNowRaw();
 
     // Intentionally calculate microseconds in a round about manner to avoid
     // overflow and precision issues. Think twice before simplifying!
-    int64_t whole_seconds = now.QuadPart / ticks_per_second_;
-    int64_t leftover_ticks = now.QuadPart % ticks_per_second_;
+    int64_t whole_seconds = now / ticks_per_second_;
+    int64_t leftover_ticks = now % ticks_per_second_;
     int64_t ticks = (whole_seconds * Time::kMicrosecondsPerSecond) +
         ((leftover_ticks * Time::kMicrosecondsPerSecond) / ticks_per_second_);
 
@@ -529,10 +565,8 @@
       return tick_clock.Pointer();
     }
 
-    // On Athlon X2 CPUs (e.g. model 15) the QueryPerformanceCounter
-    // is unreliable, fallback to the low-resolution tick clock.
-    CPU cpu;
-    if (strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15) {
+    // If QPC not reliable, fallback to low-resolution tick clock.
+    if (IsQPCReliable()) {
       return tick_clock.Pointer();
     }
 
@@ -621,11 +655,106 @@
 #elif(defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
   defined(V8_OS_ANDROID)
   return ThreadTicks(ClockNow(CLOCK_THREAD_CPUTIME_ID));
+#elif V8_OS_WIN
+  return ThreadTicks::GetForThread(::GetCurrentThread());
 #else
   UNREACHABLE();
   return ThreadTicks();
 #endif
 }
 
+
+#if V8_OS_WIN
+ThreadTicks ThreadTicks::GetForThread(const HANDLE& thread_handle) {
+  DCHECK(IsSupported());
+
+  // Get the number of TSC ticks used by the current thread.
+  ULONG64 thread_cycle_time = 0;
+  ::QueryThreadCycleTime(thread_handle, &thread_cycle_time);
+
+  // Get the frequency of the TSC.
+  double tsc_ticks_per_second = TSCTicksPerSecond();
+  if (tsc_ticks_per_second == 0)
+    return ThreadTicks();
+
+  // Return the CPU time of the current thread.
+  double thread_time_seconds = thread_cycle_time / tsc_ticks_per_second;
+  return ThreadTicks(
+      static_cast<int64_t>(thread_time_seconds * Time::kMicrosecondsPerSecond));
+}
+
+// static
+bool ThreadTicks::IsSupportedWin() {
+  static bool is_supported = base::CPU().has_non_stop_time_stamp_counter() &&
+                             !IsQPCReliable();
+  return is_supported;
+}
+
+// static
+void ThreadTicks::WaitUntilInitializedWin() {
+  while (TSCTicksPerSecond() == 0)
+    ::Sleep(10);
+}
+
+double ThreadTicks::TSCTicksPerSecond() {
+  DCHECK(IsSupported());
+
+  // The value returned by QueryPerformanceFrequency() cannot be used as the TSC
+  // frequency, because there is no guarantee that the TSC frequency is equal to
+  // the performance counter frequency.
+
+  // The TSC frequency is cached in a static variable because it takes some time
+  // to compute it.
+  static double tsc_ticks_per_second = 0;
+  if (tsc_ticks_per_second != 0)
+    return tsc_ticks_per_second;
+
+  // Increase the thread priority to reduces the chances of having a context
+  // switch during a reading of the TSC and the performance counter.
+  int previous_priority = ::GetThreadPriority(::GetCurrentThread());
+  ::SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
+
+  // The first time that this function is called, make an initial reading of the
+  // TSC and the performance counter.
+  static const uint64_t tsc_initial = __rdtsc();
+  static const uint64_t perf_counter_initial = QPCNowRaw();
+
+  // Make a another reading of the TSC and the performance counter every time
+  // that this function is called.
+  uint64_t tsc_now = __rdtsc();
+  uint64_t perf_counter_now = QPCNowRaw();
+
+  // Reset the thread priority.
+  ::SetThreadPriority(::GetCurrentThread(), previous_priority);
+
+  // Make sure that at least 50 ms elapsed between the 2 readings. The first
+  // time that this function is called, we don't expect this to be the case.
+  // Note: The longer the elapsed time between the 2 readings is, the more
+  //   accurate the computed TSC frequency will be. The 50 ms value was
+  //   chosen because local benchmarks show that it allows us to get a
+  //   stddev of less than 1 tick/us between multiple runs.
+  // Note: According to the MSDN documentation for QueryPerformanceFrequency(),
+  //   this will never fail on systems that run XP or later.
+  //   https://msdn.microsoft.com/library/windows/desktop/ms644905.aspx
+  LARGE_INTEGER perf_counter_frequency = {};
+  ::QueryPerformanceFrequency(&perf_counter_frequency);
+  DCHECK_GE(perf_counter_now, perf_counter_initial);
+  uint64_t perf_counter_ticks = perf_counter_now - perf_counter_initial;
+  double elapsed_time_seconds =
+      perf_counter_ticks / static_cast<double>(perf_counter_frequency.QuadPart);
+
+  const double kMinimumEvaluationPeriodSeconds = 0.05;
+  if (elapsed_time_seconds < kMinimumEvaluationPeriodSeconds)
+    return 0;
+
+  // Compute the frequency of the TSC.
+  DCHECK_GE(tsc_now, tsc_initial);
+  uint64_t tsc_ticks = tsc_now - tsc_initial;
+  tsc_ticks_per_second = tsc_ticks / elapsed_time_seconds;
+
+  return tsc_ticks_per_second;
+}
+#endif  // V8_OS_WIN
+
 }  // namespace base
 }  // namespace v8
diff --git a/src/base/platform/time.h b/src/base/platform/time.h
index e17fc1d..be62014 100644
--- a/src/base/platform/time.h
+++ b/src/base/platform/time.h
@@ -12,6 +12,9 @@
 #include "src/base/bits.h"
 #include "src/base/macros.h"
 #include "src/base/safe_math.h"
+#if V8_OS_WIN
+#include "src/base/win32-headers.h"
+#endif
 
 // Forward declarations.
 extern "C" {
@@ -380,17 +383,42 @@
   // Returns true if ThreadTicks::Now() is supported on this system.
   static bool IsSupported();
 
+  // Waits until the initialization is completed. Needs to be guarded with a
+  // call to IsSupported().
+  static void WaitUntilInitialized() {
+#if V8_OS_WIN
+    WaitUntilInitializedWin();
+#endif
+  }
+
   // Returns thread-specific CPU-time on systems that support this feature.
   // Needs to be guarded with a call to IsSupported(). Use this timer
   // to (approximately) measure how much time the calling thread spent doing
   // actual work vs. being de-scheduled. May return bogus results if the thread
   // migrates to another CPU between two calls. Returns an empty ThreadTicks
-  // object until the initialization is completed.
+  // object until the initialization is completed. If a clock reading is
+  // absolutely needed, call WaitUntilInitialized() before this method.
   static ThreadTicks Now();
 
+#if V8_OS_WIN
+  // Similar to Now() above except this returns thread-specific CPU time for an
+  // arbitrary thread. All comments for Now() method above apply apply to this
+  // method as well.
+  static ThreadTicks GetForThread(const HANDLE& thread_handle);
+#endif
+
  private:
-  // This is for internal use and testing. Ticks are in microseconds.
+  // Please use Now() or GetForThread() to create a new object. This is for
+  // internal use and testing. Ticks are in microseconds.
   explicit ThreadTicks(int64_t ticks) : TimeBase(ticks) {}
+
+#if V8_OS_WIN
+  // Returns the frequency of the TSC in ticks per second, or 0 if it hasn't
+  // been measured yet. Needs to be guarded with a call to IsSupported().
+  static double TSCTicksPerSecond();
+  static bool IsSupportedWin();
+  static void WaitUntilInitializedWin();
+#endif
 };
 
 }  // namespace base
diff --git a/src/base/utils/random-number-generator.cc b/src/base/utils/random-number-generator.cc
index ff42840..3a6f2c6 100644
--- a/src/base/utils/random-number-generator.cc
+++ b/src/base/utils/random-number-generator.cc
@@ -124,10 +124,10 @@
 
 
 void RandomNumberGenerator::SetSeed(int64_t seed) {
-  if (seed == 0) seed = 1;
   initial_seed_ = seed;
   state0_ = MurmurHash3(bit_cast<uint64_t>(seed));
-  state1_ = MurmurHash3(state0_);
+  state1_ = MurmurHash3(~state0_);
+  CHECK(state0_ != 0 || state1_ != 0);
 }
 
 
diff --git a/src/base/win32-headers.h b/src/base/win32-headers.h
index 20ec8e0..b61ce71 100644
--- a/src/base/win32-headers.h
+++ b/src/base/win32-headers.h
@@ -27,10 +27,10 @@
 #ifndef NOMCX
 #define NOMCX
 #endif
-// Require Windows XP or higher (this is required for the RtlCaptureContext
-// function to be present).
+// Require Windows Vista or higher (this is required for the
+// QueryThreadCycleTime function to be present).
 #ifndef _WIN32_WINNT
-#define _WIN32_WINNT 0x501
+#define _WIN32_WINNT 0x0600
 #endif
 
 #include <windows.h>
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 70eec2b..0be96ea 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -6,6 +6,7 @@
 
 #include "src/accessors.h"
 #include "src/api-natives.h"
+#include "src/base/ieee754.h"
 #include "src/code-stubs.h"
 #include "src/extensions/externalize-string-extension.h"
 #include "src/extensions/free-buffer-extension.h"
@@ -31,17 +32,15 @@
 Handle<String> Bootstrapper::SourceLookup(int index) {
   DCHECK(0 <= index && index < Source::GetBuiltinsCount());
   Heap* heap = isolate_->heap();
-  if (Source::GetSourceCache(heap)->get(index)->IsUndefined()) {
+  if (Source::GetSourceCache(heap)->get(index)->IsUndefined(isolate_)) {
     // We can use external strings for the natives.
     Vector<const char> source = Source::GetScriptSource(index);
     NativesExternalStringResource* resource =
         new NativesExternalStringResource(source.start(), source.length());
-    // We do not expect this to throw an exception. Change this if it does.
-    Handle<String> source_code = isolate_->factory()
-                                     ->NewExternalStringFromOneByte(resource)
-                                     .ToHandleChecked();
+    Handle<ExternalOneByteString> source_code =
+        isolate_->factory()->NewNativeSourceString(resource);
     // Mark this external string with a special map.
-    source_code->set_map(isolate_->heap()->native_source_string_map());
+    DCHECK(source_code->is_short());
     Source::GetSourceCache(heap)->set(index, *source_code);
   }
   Handle<Object> cached_source(Source::GetSourceCache(heap)->get(index),
@@ -111,9 +110,10 @@
 void DeleteNativeSources(Object* maybe_array) {
   if (maybe_array->IsFixedArray()) {
     FixedArray* array = FixedArray::cast(maybe_array);
+    Isolate* isolate = array->GetIsolate();
     for (int i = 0; i < array->length(); i++) {
       Object* natives_source = array->get(i);
-      if (!natives_source->IsUndefined()) {
+      if (!natives_source->IsUndefined(isolate)) {
         const NativesExternalStringResource* resource =
             reinterpret_cast<const NativesExternalStringResource*>(
                 ExternalOneByteString::cast(natives_source)->resource());
@@ -139,7 +139,7 @@
  public:
   Genesis(Isolate* isolate, MaybeHandle<JSGlobalProxy> maybe_global_proxy,
           v8::Local<v8::ObjectTemplate> global_proxy_template,
-          v8::ExtensionConfiguration* extensions,
+          v8::ExtensionConfiguration* extensions, size_t context_snapshot_index,
           GlobalContextType context_type);
   ~Genesis() { }
 
@@ -167,7 +167,7 @@
   void CreateJSProxyMaps();
 
   // Make the "arguments" and "caller" properties throw a TypeError on access.
-  void AddRestrictedFunctionProperties(Handle<Map> map);
+  void AddRestrictedFunctionProperties(Handle<JSFunction> empty);
 
   // Creates the global objects using the global proxy and the template passed
   // in through the API.  We call this regardless of whether we are building a
@@ -237,7 +237,7 @@
     void set_state(RegisteredExtension* extension,
                    ExtensionTraversalState state);
    private:
-    HashMap map_;
+    base::HashMap map_;
     DISALLOW_COPY_AND_ASSIGN(ExtensionStates);
   };
 
@@ -325,13 +325,13 @@
 Handle<Context> Bootstrapper::CreateEnvironment(
     MaybeHandle<JSGlobalProxy> maybe_global_proxy,
     v8::Local<v8::ObjectTemplate> global_proxy_template,
-    v8::ExtensionConfiguration* extensions, GlobalContextType context_type) {
+    v8::ExtensionConfiguration* extensions, size_t context_snapshot_index,
+    GlobalContextType context_type) {
   HandleScope scope(isolate_);
   Genesis genesis(isolate_, maybe_global_proxy, global_proxy_template,
-                  extensions, context_type);
+                  extensions, context_snapshot_index, context_type);
   Handle<Context> env = genesis.result();
-  if (env.is_null() ||
-      (context_type != THIN_CONTEXT && !InstallExtensions(env, extensions))) {
+  if (env.is_null() || !InstallExtensions(env, extensions)) {
     return Handle<Context>();
   }
   return scope.CloseAndEscape(env);
@@ -380,6 +380,16 @@
   InstallFunction(target, name, function, name_string, attributes);
 }
 
+Handle<JSFunction> InstallGetter(Handle<JSObject> target,
+                                 Handle<Name> property_name,
+                                 Handle<JSFunction> getter,
+                                 PropertyAttributes attributes = DONT_ENUM) {
+  Handle<Object> setter = target->GetIsolate()->factory()->undefined_value();
+  JSObject::DefineAccessor(target, property_name, getter, setter, attributes)
+      .Check();
+  return getter;
+}
+
 Handle<JSFunction> CreateFunction(Isolate* isolate, Handle<String> name,
                                   InstanceType type, int instance_size,
                                   MaybeHandle<JSObject> maybe_prototype,
@@ -388,12 +398,9 @@
   Factory* factory = isolate->factory();
   Handle<Code> call_code(isolate->builtins()->builtin(call));
   Handle<JSObject> prototype;
-  static const bool kReadOnlyPrototype = false;
-  static const bool kInstallConstructor = false;
   return maybe_prototype.ToHandle(&prototype)
              ? factory->NewFunction(name, call_code, prototype, type,
-                                    instance_size, kReadOnlyPrototype,
-                                    kInstallConstructor, strict_function_map)
+                                    instance_size, strict_function_map)
              : factory->NewFunctionWithoutPrototype(name, call_code,
                                                     strict_function_map);
 }
@@ -457,6 +464,27 @@
                                len, adapt);
 }
 
+Handle<JSFunction> SimpleInstallGetter(Handle<JSObject> base,
+                                       Handle<String> name, Builtins::Name call,
+                                       bool adapt) {
+  Isolate* const isolate = base->GetIsolate();
+  Handle<String> fun_name =
+      Name::ToFunctionName(name, isolate->factory()->get_string())
+          .ToHandleChecked();
+  Handle<JSFunction> fun =
+      SimpleCreateFunction(isolate, fun_name, call, 0, adapt);
+  InstallGetter(base, name, fun);
+  return fun;
+}
+
+Handle<JSFunction> SimpleInstallGetter(Handle<JSObject> base,
+                                       Handle<String> name, Builtins::Name call,
+                                       bool adapt, BuiltinFunctionId id) {
+  Handle<JSFunction> fun = SimpleInstallGetter(base, name, call, adapt);
+  fun->shared()->set_builtin_function_id(id);
+  return fun;
+}
+
 }  // namespace
 
 void Genesis::SetFunctionInstanceDescriptor(Handle<Map> map,
@@ -469,6 +497,7 @@
   PropertyAttributes roc_attribs =
       static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
 
+  STATIC_ASSERT(JSFunction::kLengthDescriptorIndex == 0);
   Handle<AccessorInfo> length =
       Accessors::FunctionLengthInfo(isolate(), roc_attribs);
   {  // Add length.
@@ -476,6 +505,8 @@
                                  length, roc_attribs);
     map->AppendDescriptor(&d);
   }
+
+  STATIC_ASSERT(JSFunction::kNameDescriptorIndex == 1);
   Handle<AccessorInfo> name =
       Accessors::FunctionNameInfo(isolate(), ro_attribs);
   {  // Add name.
@@ -611,9 +642,6 @@
   Map::SetPrototype(sloppy_function_without_prototype_map, empty_function);
   Map::SetPrototype(sloppy_function_map_writable_prototype_, empty_function);
 
-  // ES6 draft 03-17-2015, section 8.2.2 step 12
-  AddRestrictedFunctionProperties(empty_function_map);
-
   return empty_function;
 }
 
@@ -633,17 +661,20 @@
   DCHECK(function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE ||
          function_mode == FUNCTION_WITH_READONLY_PROTOTYPE ||
          function_mode == FUNCTION_WITHOUT_PROTOTYPE);
+  STATIC_ASSERT(JSFunction::kLengthDescriptorIndex == 0);
   {  // Add length.
     Handle<AccessorInfo> length =
         Accessors::FunctionLengthInfo(isolate(), roc_attribs);
-    AccessorConstantDescriptor d(Handle<Name>(Name::cast(length->name())),
-                                 length, roc_attribs);
+    AccessorConstantDescriptor d(handle(Name::cast(length->name())), length,
+                                 roc_attribs);
     map->AppendDescriptor(&d);
   }
+
+  STATIC_ASSERT(JSFunction::kNameDescriptorIndex == 1);
   {  // Add name.
     Handle<AccessorInfo> name =
         Accessors::FunctionNameInfo(isolate(), roc_attribs);
-    AccessorConstantDescriptor d(Handle<Name>(Name::cast(name->name())), name,
+    AccessorConstantDescriptor d(handle(Name::cast(name->name())), name,
                                  roc_attribs);
     map->AppendDescriptor(&d);
   }
@@ -668,7 +699,7 @@
       factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("ThrowTypeError"));
   Handle<Code> code(isolate()->builtins()->builtin(builtin_name));
   Handle<JSFunction> function =
-      factory()->NewFunctionWithoutPrototype(name, code);
+      factory()->NewFunctionWithoutPrototype(name, code, true);
   function->shared()->DontAdaptArguments();
 
   // %ThrowTypeError% must not have a name property.
@@ -741,6 +772,10 @@
   // This map is installed in MakeFunctionInstancePrototypeWritable.
   strict_function_map_writable_prototype_ =
       CreateStrictFunctionMap(FUNCTION_WITH_WRITEABLE_PROTOTYPE, empty);
+
+  // Now that the strict mode function map is available, set up the
+  // restricted "arguments" and "caller" getters.
+  AddRestrictedFunctionProperties(empty);
 }
 
 void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
@@ -870,14 +905,14 @@
   descriptors->Replace(idx, &descriptor);
 }
 
-
-void Genesis::AddRestrictedFunctionProperties(Handle<Map> map) {
+void Genesis::AddRestrictedFunctionProperties(Handle<JSFunction> empty) {
   PropertyAttributes rw_attribs = static_cast<PropertyAttributes>(DONT_ENUM);
   Handle<JSFunction> thrower = GetRestrictedFunctionPropertiesThrower();
   Handle<AccessorPair> accessors = factory()->NewAccessorPair();
   accessors->set_getter(*thrower);
   accessors->set_setter(*thrower);
 
+  Handle<Map> map(empty->map());
   ReplaceAccessors(map, factory()->arguments_string(), rw_attribs, accessors);
   ReplaceAccessors(map, factory()->caller_string(), rw_attribs, accessors);
 }
@@ -885,13 +920,14 @@
 
 static void AddToWeakNativeContextList(Context* context) {
   DCHECK(context->IsNativeContext());
-  Heap* heap = context->GetIsolate()->heap();
+  Isolate* isolate = context->GetIsolate();
+  Heap* heap = isolate->heap();
 #ifdef DEBUG
   { // NOLINT
-    DCHECK(context->next_context_link()->IsUndefined());
+    DCHECK(context->next_context_link()->IsUndefined(isolate));
     // Check that context is not in the list yet.
     for (Object* current = heap->native_contexts_list();
-         !current->IsUndefined();
+         !current->IsUndefined(isolate);
          current = Context::cast(current)->next_context_link()) {
       DCHECK(current != context);
     }
@@ -966,7 +1002,7 @@
             FunctionTemplateInfo::cast(data->constructor()));
     Handle<Object> proto_template(global_constructor->prototype_template(),
                                   isolate());
-    if (!proto_template->IsUndefined()) {
+    if (!proto_template->IsUndefined(isolate())) {
       js_global_object_template =
           Handle<ObjectTemplateInfo>::cast(proto_template);
     }
@@ -1036,7 +1072,9 @@
   global_proxy->set_native_context(*native_context());
   // If we deserialized the context, the global proxy is already
   // correctly set up. Otherwise it's undefined.
-  DCHECK(native_context()->get(Context::GLOBAL_PROXY_INDEX)->IsUndefined() ||
+  DCHECK(native_context()
+             ->get(Context::GLOBAL_PROXY_INDEX)
+             ->IsUndefined(isolate()) ||
          native_context()->global_proxy() == *global_proxy);
   native_context()->set_global_proxy(*global_proxy);
 }
@@ -1180,8 +1218,15 @@
     // Setup the methods on the %FunctionPrototype%.
     SimpleInstallFunction(prototype, factory->apply_string(),
                           Builtins::kFunctionPrototypeApply, 2, false);
-    SimpleInstallFunction(prototype, factory->bind_string(),
-                          Builtins::kFunctionPrototypeBind, 1, false);
+
+    FastFunctionBindStub bind_stub(isolate);
+    Handle<JSFunction> bind_function = factory->NewFunctionWithoutPrototype(
+        factory->bind_string(), bind_stub.GetCode(), false);
+    bind_function->shared()->DontAdaptArguments();
+    bind_function->shared()->set_length(1);
+    InstallFunction(prototype, bind_function, factory->bind_string(),
+                    DONT_ENUM);
+
     SimpleInstallFunction(prototype, factory->call_string(),
                           Builtins::kFunctionPrototypeCall, 1, false);
     SimpleInstallFunction(prototype, factory->toString_string(),
@@ -1324,7 +1369,11 @@
 
     // Install the String.fromCharCode function.
     SimpleInstallFunction(string_fun, "fromCharCode",
-                          Builtins::kStringFromCharCode, 1, false);
+                          Builtins::kStringFromCharCode, 1, true);
+
+    // Install the String.fromCodePoint function.
+    SimpleInstallFunction(string_fun, "fromCodePoint",
+                          Builtins::kStringFromCodePoint, 1, false);
 
     // Create the %StringPrototype%
     Handle<JSValue> prototype =
@@ -1341,6 +1390,12 @@
                           1, true);
     SimpleInstallFunction(prototype, "charCodeAt",
                           Builtins::kStringPrototypeCharCodeAt, 1, true);
+    SimpleInstallFunction(prototype, "trim", Builtins::kStringPrototypeTrim, 0,
+                          false);
+    SimpleInstallFunction(prototype, "trimLeft",
+                          Builtins::kStringPrototypeTrimLeft, 0, false);
+    SimpleInstallFunction(prototype, "trimRight",
+                          Builtins::kStringPrototypeTrimRight, 0, false);
   }
 
   {
@@ -1470,6 +1525,8 @@
                           0, true);
     SimpleInstallFunction(prototype, "setYear", Builtins::kDatePrototypeSetYear,
                           1, false);
+    SimpleInstallFunction(prototype, "toJSON", Builtins::kDatePrototypeToJson,
+                          1, false);
 
     // Install i18n fallback functions.
     SimpleInstallFunction(prototype, "toLocaleString",
@@ -1528,7 +1585,7 @@
 
   {  // -- E r r o r
     Handle<JSFunction> error_fun = InstallFunction(
-        global, "Error", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+        global, "Error", JS_ERROR_TYPE, JSObject::kHeaderSize,
         isolate->initial_object_prototype(), Builtins::kIllegal);
     InstallWithIntrinsicDefaultProto(isolate, error_fun,
                                      Context::ERROR_FUNCTION_INDEX);
@@ -1536,7 +1593,7 @@
 
   {  // -- E v a l E r r o r
     Handle<JSFunction> eval_error_fun = InstallFunction(
-        global, "EvalError", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+        global, "EvalError", JS_ERROR_TYPE, JSObject::kHeaderSize,
         isolate->initial_object_prototype(), Builtins::kIllegal);
     InstallWithIntrinsicDefaultProto(isolate, eval_error_fun,
                                      Context::EVAL_ERROR_FUNCTION_INDEX);
@@ -1544,7 +1601,7 @@
 
   {  // -- R a n g e E r r o r
     Handle<JSFunction> range_error_fun = InstallFunction(
-        global, "RangeError", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+        global, "RangeError", JS_ERROR_TYPE, JSObject::kHeaderSize,
         isolate->initial_object_prototype(), Builtins::kIllegal);
     InstallWithIntrinsicDefaultProto(isolate, range_error_fun,
                                      Context::RANGE_ERROR_FUNCTION_INDEX);
@@ -1552,7 +1609,7 @@
 
   {  // -- R e f e r e n c e E r r o r
     Handle<JSFunction> reference_error_fun = InstallFunction(
-        global, "ReferenceError", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+        global, "ReferenceError", JS_ERROR_TYPE, JSObject::kHeaderSize,
         isolate->initial_object_prototype(), Builtins::kIllegal);
     InstallWithIntrinsicDefaultProto(isolate, reference_error_fun,
                                      Context::REFERENCE_ERROR_FUNCTION_INDEX);
@@ -1560,7 +1617,7 @@
 
   {  // -- S y n t a x E r r o r
     Handle<JSFunction> syntax_error_fun = InstallFunction(
-        global, "SyntaxError", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+        global, "SyntaxError", JS_ERROR_TYPE, JSObject::kHeaderSize,
         isolate->initial_object_prototype(), Builtins::kIllegal);
     InstallWithIntrinsicDefaultProto(isolate, syntax_error_fun,
                                      Context::SYNTAX_ERROR_FUNCTION_INDEX);
@@ -1568,7 +1625,7 @@
 
   {  // -- T y p e E r r o r
     Handle<JSFunction> type_error_fun = InstallFunction(
-        global, "TypeError", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+        global, "TypeError", JS_ERROR_TYPE, JSObject::kHeaderSize,
         isolate->initial_object_prototype(), Builtins::kIllegal);
     InstallWithIntrinsicDefaultProto(isolate, type_error_fun,
                                      Context::TYPE_ERROR_FUNCTION_INDEX);
@@ -1576,7 +1633,7 @@
 
   {  // -- U R I E r r o r
     Handle<JSFunction> uri_error_fun = InstallFunction(
-        global, "URIError", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+        global, "URIError", JS_ERROR_TYPE, JSObject::kHeaderSize,
         isolate->initial_object_prototype(), Builtins::kIllegal);
     InstallWithIntrinsicDefaultProto(isolate, uri_error_fun,
                                      Context::URI_ERROR_FUNCTION_INDEX);
@@ -1586,17 +1643,21 @@
   Handle<FixedArray> embedder_data = factory->NewFixedArray(3);
   native_context()->set_embedder_data(*embedder_data);
 
-  if (context_type == THIN_CONTEXT) return;
-
   {  // -- J S O N
     Handle<String> name = factory->InternalizeUtf8String("JSON");
     Handle<JSFunction> cons = factory->NewFunction(name);
     JSFunction::SetInstancePrototype(cons,
         Handle<Object>(native_context()->initial_object_prototype(), isolate));
-    cons->shared()->set_instance_class_name(*name);
     Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
     DCHECK(json_object->IsJSObject());
     JSObject::AddProperty(global, name, json_object, DONT_ENUM);
+    SimpleInstallFunction(json_object, "parse", Builtins::kJsonParse, 2, false);
+    SimpleInstallFunction(json_object, "stringify", Builtins::kJsonStringify, 3,
+                          true);
+    JSObject::AddProperty(
+        json_object, factory->to_string_tag_symbol(),
+        factory->NewStringFromAsciiChecked("JSON"),
+        static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
   }
 
   {  // -- M a t h
@@ -1605,27 +1666,64 @@
     JSFunction::SetInstancePrototype(
         cons,
         Handle<Object>(native_context()->initial_object_prototype(), isolate));
-    cons->shared()->set_instance_class_name(*name);
     Handle<JSObject> math = factory->NewJSObject(cons, TENURED);
     DCHECK(math->IsJSObject());
     JSObject::AddProperty(global, name, math, DONT_ENUM);
     SimpleInstallFunction(math, "acos", Builtins::kMathAcos, 1, true);
     SimpleInstallFunction(math, "asin", Builtins::kMathAsin, 1, true);
     SimpleInstallFunction(math, "atan", Builtins::kMathAtan, 1, true);
+    SimpleInstallFunction(math, "atan2", Builtins::kMathAtan2, 2, true);
+    SimpleInstallFunction(math, "atanh", Builtins::kMathAtanh, 1, true);
     SimpleInstallFunction(math, "ceil", Builtins::kMathCeil, 1, true);
+    SimpleInstallFunction(math, "cbrt", Builtins::kMathCbrt, 1, true);
+    SimpleInstallFunction(math, "expm1", Builtins::kMathExpm1, 1, true);
     SimpleInstallFunction(math, "clz32", Builtins::kMathClz32, 1, true);
+    SimpleInstallFunction(math, "cos", Builtins::kMathCos, 1, true);
+    Handle<JSFunction> math_exp =
+        SimpleInstallFunction(math, "exp", Builtins::kMathExp, 1, true);
+    native_context()->set_math_exp(*math_exp);
     Handle<JSFunction> math_floor =
         SimpleInstallFunction(math, "floor", Builtins::kMathFloor, 1, true);
     native_context()->set_math_floor(*math_floor);
     SimpleInstallFunction(math, "fround", Builtins::kMathFround, 1, true);
     SimpleInstallFunction(math, "imul", Builtins::kMathImul, 2, true);
+    Handle<JSFunction> math_log =
+        SimpleInstallFunction(math, "log", Builtins::kMathLog, 1, true);
+    native_context()->set_math_log(*math_log);
+    SimpleInstallFunction(math, "log1p", Builtins::kMathLog1p, 1, true);
+    SimpleInstallFunction(math, "log2", Builtins::kMathLog2, 1, true);
+    SimpleInstallFunction(math, "log10", Builtins::kMathLog10, 1, true);
     SimpleInstallFunction(math, "max", Builtins::kMathMax, 2, false);
     SimpleInstallFunction(math, "min", Builtins::kMathMin, 2, false);
     SimpleInstallFunction(math, "round", Builtins::kMathRound, 1, true);
+    SimpleInstallFunction(math, "sin", Builtins::kMathSin, 1, true);
     Handle<JSFunction> math_sqrt =
         SimpleInstallFunction(math, "sqrt", Builtins::kMathSqrt, 1, true);
     native_context()->set_math_sqrt(*math_sqrt);
+    SimpleInstallFunction(math, "tan", Builtins::kMathTan, 1, true);
     SimpleInstallFunction(math, "trunc", Builtins::kMathTrunc, 1, true);
+
+    // Install math constants.
+    double const kE = base::ieee754::exp(1.0);
+    JSObject::AddProperty(
+        math, factory->NewStringFromAsciiChecked("E"), factory->NewNumber(kE),
+        static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
+    JSObject::AddProperty(
+        math, factory->NewStringFromAsciiChecked("LN10"),
+        factory->NewNumber(base::ieee754::log(10.0)),
+        static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
+    JSObject::AddProperty(
+        math, factory->NewStringFromAsciiChecked("LN2"),
+        factory->NewNumber(base::ieee754::log(2.0)),
+        static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
+    JSObject::AddProperty(
+        math, factory->NewStringFromAsciiChecked("LOG10E"),
+        factory->NewNumber(base::ieee754::log10(kE)),
+        static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
+    JSObject::AddProperty(
+        math, factory->NewStringFromAsciiChecked("LOG2E"),
+        factory->NewNumber(base::ieee754::log2(kE)),
+        static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
   }
 
   {  // -- A r r a y B u f f e r
@@ -1635,6 +1733,36 @@
                                      Context::ARRAY_BUFFER_FUN_INDEX);
   }
 
+  {  // -- T y p e d A r r a y
+    Handle<JSObject> prototype =
+        factory->NewJSObject(isolate->object_function(), TENURED);
+    native_context()->set_typed_array_prototype(*prototype);
+
+    Handle<JSFunction> typed_array_fun =
+        CreateFunction(isolate, factory->InternalizeUtf8String("TypedArray"),
+                       JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize, prototype,
+                       Builtins::kIllegal);
+
+    // Install the "constructor" property on the {prototype}.
+    JSObject::AddProperty(prototype, factory->constructor_string(),
+                          typed_array_fun, DONT_ENUM);
+    native_context()->set_typed_array_function(*typed_array_fun);
+
+    // Install the "buffer", "byteOffset", "byteLength" and "length"
+    // getters on the {prototype}.
+    SimpleInstallGetter(prototype, factory->buffer_string(),
+                        Builtins::kTypedArrayPrototypeBuffer, false);
+    SimpleInstallGetter(prototype, factory->byte_length_string(),
+                        Builtins::kTypedArrayPrototypeByteLength, true,
+                        kTypedArrayByteLength);
+    SimpleInstallGetter(prototype, factory->byte_offset_string(),
+                        Builtins::kTypedArrayPrototypeByteOffset, true,
+                        kTypedArrayByteOffset);
+    SimpleInstallGetter(prototype, factory->length_string(),
+                        Builtins::kTypedArrayPrototypeLength, true,
+                        kTypedArrayLength);
+  }
+
   {  // -- T y p e d A r r a y s
 #define INSTALL_TYPED_ARRAY(Type, type, TYPE, ctype, size)             \
   {                                                                    \
@@ -1645,17 +1773,43 @@
   }
     TYPED_ARRAYS(INSTALL_TYPED_ARRAY)
 #undef INSTALL_TYPED_ARRAY
+  }
 
-    Handle<JSFunction> data_view_fun = InstallFunction(
-        global, "DataView", JS_DATA_VIEW_TYPE,
-        JSDataView::kSizeWithInternalFields,
-        isolate->initial_object_prototype(), Builtins::kDataViewConstructor);
+  {  // -- D a t a V i e w
+    Handle<JSObject> prototype =
+        factory->NewJSObject(isolate->object_function(), TENURED);
+    Handle<JSFunction> data_view_fun =
+        InstallFunction(global, "DataView", JS_DATA_VIEW_TYPE,
+                        JSDataView::kSizeWithInternalFields, prototype,
+                        Builtins::kDataViewConstructor);
     InstallWithIntrinsicDefaultProto(isolate, data_view_fun,
                                      Context::DATA_VIEW_FUN_INDEX);
     data_view_fun->shared()->set_construct_stub(
         *isolate->builtins()->DataViewConstructor_ConstructStub());
     data_view_fun->shared()->set_length(3);
     data_view_fun->shared()->DontAdaptArguments();
+
+    // Install the @@toStringTag property on the {prototype}.
+    JSObject::AddProperty(
+        prototype, factory->to_string_tag_symbol(),
+        factory->NewStringFromAsciiChecked("DataView"),
+        static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+    // Install the "constructor" property on the {prototype}.
+    JSObject::AddProperty(prototype, factory->constructor_string(),
+                          data_view_fun, DONT_ENUM);
+
+    // Install the "buffer", "byteOffset" and "byteLength" getters
+    // on the {prototype}.
+    SimpleInstallGetter(prototype, factory->buffer_string(),
+                        Builtins::kDataViewPrototypeGetBuffer, false,
+                        kDataViewBuffer);
+    SimpleInstallGetter(prototype, factory->byte_length_string(),
+                        Builtins::kDataViewPrototypeGetByteLength, false,
+                        kDataViewByteLength);
+    SimpleInstallGetter(prototype, factory->byte_offset_string(),
+                        Builtins::kDataViewPrototypeGetByteOffset, false,
+                        kDataViewByteOffset);
   }
 
   {  // -- M a p
@@ -1824,7 +1978,7 @@
     function->shared()->set_instance_class_name(*arguments_string);
 
     Handle<Map> map = factory->NewMap(
-        JS_OBJECT_TYPE, JSSloppyArgumentsObject::kSize, FAST_ELEMENTS);
+        JS_ARGUMENTS_TYPE, JSSloppyArgumentsObject::kSize, FAST_ELEMENTS);
     // Create the descriptor array for the arguments object.
     Map::EnsureDescriptorSlack(map, 2);
 
@@ -1884,7 +2038,7 @@
 
     // Create the map. Allocate one in-object field for length.
     Handle<Map> map = factory->NewMap(
-        JS_OBJECT_TYPE, JSStrictArgumentsObject::kSize, FAST_ELEMENTS);
+        JS_ARGUMENTS_TYPE, JSStrictArgumentsObject::kSize, FAST_ELEMENTS);
     // Create the descriptor array for the arguments object.
     Map::EnsureDescriptorSlack(map, 3);
 
@@ -1954,13 +2108,20 @@
   }
 }  // NOLINT(readability/fn_size)
 
-
 void Genesis::InstallTypedArray(const char* name, ElementsKind elements_kind,
                                 Handle<JSFunction>* fun) {
   Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
-  Handle<JSFunction> result = InstallFunction(
-      global, name, JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize,
-      isolate()->initial_object_prototype(), Builtins::kIllegal);
+
+  Handle<JSObject> typed_array_prototype =
+      Handle<JSObject>(isolate()->typed_array_prototype());
+  Handle<JSFunction> typed_array_function =
+      Handle<JSFunction>(isolate()->typed_array_function());
+
+  Handle<JSObject> prototype =
+      factory()->NewJSObject(isolate()->object_function(), TENURED);
+  Handle<JSFunction> result =
+      InstallFunction(global, name, JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize,
+                      prototype, Builtins::kIllegal);
 
   Handle<Map> initial_map = isolate()->factory()->NewMap(
       JS_TYPED_ARRAY_TYPE,
@@ -1968,6 +2129,14 @@
       elements_kind);
   JSFunction::SetInitialMap(result, initial_map,
                             handle(initial_map->prototype(), isolate()));
+
+  CHECK(JSObject::SetPrototype(result, typed_array_function, false,
+                               Object::DONT_THROW)
+            .FromJust());
+
+  CHECK(JSObject::SetPrototype(prototype, typed_array_prototype, false,
+                               Object::DONT_THROW)
+            .FromJust());
   *fun = result;
 }
 
@@ -2191,8 +2360,6 @@
       JSObject::AddProperty(global, natives_key, utils, DONT_ENUM);
       break;
     }
-    case THIN_CONTEXT:
-      break;
   }
 
   // The utils object can be removed for cases that reach this point.
@@ -2443,6 +2610,13 @@
     }
 
     {
+      // TODO(mvstanton): Remove this when MathSinh, MathCosh and MathTanh are
+      // no longer implemented in fdlibm.js.
+      SimpleInstallFunction(container, "MathExpm1", Builtins::kMathExpm1, 1,
+                            true);
+    }
+
+    {
       PrototypeIterator iter(native_context->sloppy_async_function_map());
       Handle<JSObject> async_function_prototype(iter.GetCurrent<JSObject>());
 
@@ -2470,12 +2644,12 @@
 
       Handle<JSFunction> async_function_next =
           SimpleInstallFunction(container, "AsyncFunctionNext",
-                                Builtins::kGeneratorPrototypeNext, 2, false);
+                                Builtins::kGeneratorPrototypeNext, 1, true);
       Handle<JSFunction> async_function_throw =
           SimpleInstallFunction(container, "AsyncFunctionThrow",
-                                Builtins::kGeneratorPrototypeThrow, 2, false);
-      async_function_next->shared()->set_native(true);
-      async_function_throw->shared()->set_native(true);
+                                Builtins::kGeneratorPrototypeThrow, 1, true);
+      async_function_next->shared()->set_native(false);
+      async_function_throw->shared()->set_native(false);
     }
   }
 }
@@ -2493,7 +2667,6 @@
                           isolate->factory()->ToBoolean(FLAG), NONE); \
   }
 
-  INITIALIZE_FLAG(FLAG_harmony_species)
   INITIALIZE_FLAG(FLAG_intl_extra)
 
 #undef INITIALIZE_FLAG
@@ -2503,20 +2676,16 @@
 #define EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(id) \
   void Genesis::InitializeGlobal_##id() {}
 
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_unicode_regexps)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_do_expressions)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_for_in)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_iterator_close)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_exec)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_lookbehind)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_named_captures)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_property)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_name)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_sent)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(promise_extra)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(intl_extra)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_explicit_tailcalls)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tailcalls)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_instanceof)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrictive_declarations)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_exponentiation_operator)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_string_padding)
@@ -2524,6 +2693,7 @@
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(icu_case_mapping)
 #endif
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_async_await)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrictive_generators)
 
 void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
                          const char* name, Handle<Symbol> value) {
@@ -2539,19 +2709,6 @@
 }
 
 
-void Genesis::InitializeGlobal_harmony_regexp_subclass() {
-  if (!FLAG_harmony_regexp_subclass) return;
-  InstallPublicSymbol(factory(), native_context(), "match",
-                      factory()->match_symbol());
-  InstallPublicSymbol(factory(), native_context(), "replace",
-                      factory()->replace_symbol());
-  InstallPublicSymbol(factory(), native_context(), "search",
-                      factory()->search_symbol());
-  InstallPublicSymbol(factory(), native_context(), "split",
-                      factory()->split_symbol());
-}
-
-
 void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
   if (!FLAG_harmony_sharedarraybuffer) return;
 
@@ -2690,13 +2847,6 @@
 }
 
 
-void Genesis::InitializeGlobal_harmony_species() {
-  if (!FLAG_harmony_species) return;
-  InstallPublicSymbol(factory(), native_context(), "species",
-                      factory()->species_symbol());
-}
-
-
 Handle<JSFunction> Genesis::InstallInternalArray(Handle<JSObject> target,
                                                  const char* name,
                                                  ElementsKind elements_kind) {
@@ -2763,9 +2913,6 @@
   DCHECK_EQ(builtin_index, Natives::GetIndex("runtime"));
   if (!Bootstrapper::CompileBuiltin(isolate(), builtin_index++)) return false;
 
-  // A thin context is ready at this point.
-  if (context_type == THIN_CONTEXT) return true;
-
   {
     // Builtin function for OpaqueReference -- a JSValue-based object,
     // that keeps its field isolated from JavaScript code. It may store
@@ -2814,6 +2961,14 @@
   native_context()->set_object_function_prototype_map(
       HeapObject::cast(object_function->initial_map()->prototype())->map());
 
+  // Set up the map for Object.create(null) instances.
+  Handle<Map> object_with_null_prototype_map =
+      Map::CopyInitialMap(handle(object_function->initial_map(), isolate()));
+  Map::SetPrototype(object_with_null_prototype_map,
+                    isolate()->factory()->null_value());
+  native_context()->set_object_with_null_prototype_map(
+      *object_with_null_prototype_map);
+
   // Store the map for the %StringPrototype% after the natives has been compiled
   // and the String function has been set up.
   Handle<JSFunction> string_function(native_context()->string_function());
@@ -2825,6 +2980,14 @@
   Handle<JSGlobalObject> global_object =
       handle(native_context()->global_object());
 
+  // Install Global.decodeURI.
+  SimpleInstallFunction(global_object, "decodeURI", Builtins::kGlobalDecodeURI,
+                        1, false);
+
+  // Install Global.decodeURIComponent.
+  SimpleInstallFunction(global_object, "decodeURIComponent",
+                        Builtins::kGlobalDecodeURIComponent, 1, false);
+
   // Install Global.encodeURI.
   SimpleInstallFunction(global_object, "encodeURI", Builtins::kGlobalEncodeURI,
                         1, false);
@@ -2833,6 +2996,14 @@
   SimpleInstallFunction(global_object, "encodeURIComponent",
                         Builtins::kGlobalEncodeURIComponent, 1, false);
 
+  // Install Global.escape.
+  SimpleInstallFunction(global_object, "escape", Builtins::kGlobalEscape, 1,
+                        false);
+
+  // Install Global.unescape.
+  SimpleInstallFunction(global_object, "unescape", Builtins::kGlobalUnescape, 1,
+                        false);
+
   // Install Global.eval.
   {
     Handle<JSFunction> eval =
@@ -3071,27 +3242,18 @@
 
 
 bool Genesis::InstallExperimentalNatives() {
-  static const char* harmony_iterator_close_natives[] = {nullptr};
-  static const char* harmony_species_natives[] = {"native harmony-species.js",
-                                                  nullptr};
   static const char* harmony_explicit_tailcalls_natives[] = {nullptr};
   static const char* harmony_tailcalls_natives[] = {nullptr};
-  static const char* harmony_unicode_regexps_natives[] = {
-      "native harmony-unicode-regexps.js", nullptr};
   static const char* harmony_sharedarraybuffer_natives[] = {
       "native harmony-sharedarraybuffer.js", "native harmony-atomics.js", NULL};
   static const char* harmony_simd_natives[] = {"native harmony-simd.js",
                                                nullptr};
   static const char* harmony_do_expressions_natives[] = {nullptr};
   static const char* harmony_for_in_natives[] = {nullptr};
-  static const char* harmony_regexp_exec_natives[] = {
-      "native harmony-regexp-exec.js", nullptr};
-  static const char* harmony_regexp_subclass_natives[] = {nullptr};
   static const char* harmony_regexp_lookbehind_natives[] = {nullptr};
-  static const char* harmony_instanceof_natives[] = {nullptr};
   static const char* harmony_restrictive_declarations_natives[] = {nullptr};
+  static const char* harmony_regexp_named_captures_natives[] = {nullptr};
   static const char* harmony_regexp_property_natives[] = {nullptr};
-  static const char* harmony_function_name_natives[] = {nullptr};
   static const char* harmony_function_sent_natives[] = {nullptr};
   static const char* promise_extra_natives[] = {"native promise-extra.js",
                                                 nullptr};
@@ -3109,6 +3271,7 @@
 #endif
   static const char* harmony_async_await_natives[] = {
       "native harmony-async-await.js", nullptr};
+  static const char* harmony_restrictive_generators_natives[] = {nullptr};
 
   for (int i = ExperimentalNatives::GetDebuggerCount();
        i < ExperimentalNatives::GetBuiltinsCount(); i++) {
@@ -3295,12 +3458,12 @@
   return v8::internal::ComputePointerHash(extension);
 }
 
-
-Genesis::ExtensionStates::ExtensionStates() : map_(HashMap::PointersMatch, 8) {}
+Genesis::ExtensionStates::ExtensionStates()
+    : map_(base::HashMap::PointersMatch, 8) {}
 
 Genesis::ExtensionTraversalState Genesis::ExtensionStates::get_state(
     RegisteredExtension* extension) {
-  i::HashMap::Entry* entry = map_.Lookup(extension, Hash(extension));
+  base::HashMap::Entry* entry = map_.Lookup(extension, Hash(extension));
   if (entry == NULL) {
     return UNVISITED;
   }
@@ -3437,7 +3600,7 @@
     // Configure the global object.
     Handle<FunctionTemplateInfo> proxy_constructor(
         FunctionTemplateInfo::cast(global_proxy_data->constructor()));
-    if (!proxy_constructor->prototype_template()->IsUndefined()) {
+    if (!proxy_constructor->prototype_template()->IsUndefined(isolate())) {
       Handle<ObjectTemplateInfo> global_object_data(
           ObjectTemplateInfo::cast(proxy_constructor->prototype_template()));
       if (!ConfigureApiObject(global_object, global_object_data)) return false;
@@ -3533,7 +3696,7 @@
     int capacity = properties->Capacity();
     for (int i = 0; i < capacity; i++) {
       Object* raw_key(properties->KeyAt(i));
-      if (properties->IsKey(raw_key)) {
+      if (properties->IsKey(isolate(), raw_key)) {
         DCHECK(raw_key->IsName());
         // If the property is already there we skip it.
         Handle<Name> key(Name::cast(raw_key));
@@ -3544,7 +3707,7 @@
         DCHECK(properties->ValueAt(i)->IsPropertyCell());
         Handle<PropertyCell> cell(PropertyCell::cast(properties->ValueAt(i)));
         Handle<Object> value(cell->value(), isolate());
-        if (value->IsTheHole()) continue;
+        if (value->IsTheHole(isolate())) continue;
         PropertyDetails details = cell->property_details();
         DCHECK_EQ(kData, details.kind());
         JSObject::AddProperty(to, key, value, details.attributes());
@@ -3556,7 +3719,7 @@
     int capacity = properties->Capacity();
     for (int i = 0; i < capacity; i++) {
       Object* raw_key(properties->KeyAt(i));
-      if (properties->IsKey(raw_key)) {
+      if (properties->IsKey(isolate(), raw_key)) {
         DCHECK(raw_key->IsName());
         // If the property is already there we skip it.
         Handle<Name> key(Name::cast(raw_key));
@@ -3567,7 +3730,7 @@
         Handle<Object> value = Handle<Object>(properties->ValueAt(i),
                                               isolate());
         DCHECK(!value->IsCell());
-        DCHECK(!value->IsTheHole());
+        DCHECK(!value->IsTheHole(isolate()));
         PropertyDetails details = properties->DetailsAt(i);
         DCHECK_EQ(kData, details.kind());
         JSObject::AddProperty(to, key, value, details.attributes());
@@ -3644,7 +3807,7 @@
                  MaybeHandle<JSGlobalProxy> maybe_global_proxy,
                  v8::Local<v8::ObjectTemplate> global_proxy_template,
                  v8::ExtensionConfiguration* extensions,
-                 GlobalContextType context_type)
+                 size_t context_snapshot_index, GlobalContextType context_type)
     : isolate_(isolate), active_(isolate->bootstrapper()) {
   NoTrackDoubleFieldsForSerializerScope disable_scope(isolate);
   result_ = Handle<Context>::null();
@@ -3673,7 +3836,8 @@
   // a snapshot. Otherwise we have to build the context from scratch.
   // Also create a context from scratch to expose natives, if required by flag.
   if (!isolate->initialized_from_snapshot() ||
-      !Snapshot::NewContextFromSnapshot(isolate, global_proxy)
+      !Snapshot::NewContextFromSnapshot(isolate, global_proxy,
+                                        context_snapshot_index)
            .ToHandle(&native_context_)) {
     native_context_ = Handle<Context>();
   }
@@ -3715,10 +3879,9 @@
 
     MakeFunctionInstancePrototypeWritable();
 
-    if (context_type != THIN_CONTEXT) {
-      if (!InstallExtraNatives()) return;
-      if (!ConfigureGlobalObjects(global_proxy_template)) return;
-    }
+    if (!InstallExtraNatives()) return;
+    if (!ConfigureGlobalObjects(global_proxy_template)) return;
+
     isolate->counters()->contexts_created_from_scratch()->Increment();
     // Re-initialize the counter because it got incremented during snapshot
     // creation.
diff --git a/src/bootstrapper.h b/src/bootstrapper.h
index 5563eea..66a3d8a 100644
--- a/src/bootstrapper.h
+++ b/src/bootstrapper.h
@@ -61,7 +61,7 @@
   DISALLOW_COPY_AND_ASSIGN(SourceCodeCache);
 };
 
-enum GlobalContextType { FULL_CONTEXT, THIN_CONTEXT, DEBUG_CONTEXT };
+enum GlobalContextType { FULL_CONTEXT, DEBUG_CONTEXT };
 
 // The Boostrapper is the public interface for creating a JavaScript global
 // context.
@@ -79,7 +79,7 @@
   Handle<Context> CreateEnvironment(
       MaybeHandle<JSGlobalProxy> maybe_global_proxy,
       v8::Local<v8::ObjectTemplate> global_object_template,
-      v8::ExtensionConfiguration* extensions,
+      v8::ExtensionConfiguration* extensions, size_t context_snapshot_index,
       GlobalContextType context_type = FULL_CONTEXT);
 
   // Detach the environment from its outer global object.
diff --git a/src/builtins.cc b/src/builtins.cc
index 75f6150..24abb72 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -7,6 +7,7 @@
 #include "src/api-arguments.h"
 #include "src/api-natives.h"
 #include "src/api.h"
+#include "src/base/ieee754.h"
 #include "src/base/once.h"
 #include "src/bootstrapper.h"
 #include "src/code-factory.h"
@@ -18,8 +19,9 @@
 #include "src/ic/handler-compiler.h"
 #include "src/ic/ic.h"
 #include "src/isolate-inl.h"
+#include "src/json-parser.h"
+#include "src/json-stringifier.h"
 #include "src/messages.h"
-#include "src/profiler/cpu-profiler.h"
 #include "src/property-descriptor.h"
 #include "src/prototype.h"
 #include "src/string-builder.h"
@@ -32,7 +34,6 @@
 namespace {
 
 // Arguments object passed to C++ builtins.
-template <BuiltinExtraArguments extra_args>
 class BuiltinArguments : public Arguments {
  public:
   BuiltinArguments(int length, Object** arguments)
@@ -63,70 +64,19 @@
   }
 
   template <class S>
-  Handle<S> target();
-  Handle<HeapObject> new_target();
+  Handle<S> target() {
+    return Arguments::at<S>(Arguments::length() - 2);
+  }
+  Handle<HeapObject> new_target() {
+    return Arguments::at<HeapObject>(Arguments::length() - 1);
+  }
 
   // Gets the total number of arguments including the receiver (but
   // excluding extra arguments).
-  int length() const;
+  int length() const { return Arguments::length() - 2; }
 };
 
 
-// Specialize BuiltinArguments for the extra arguments.
-
-template <>
-int BuiltinArguments<BuiltinExtraArguments::kNone>::length() const {
-  return Arguments::length();
-}
-
-template <>
-int BuiltinArguments<BuiltinExtraArguments::kTarget>::length() const {
-  return Arguments::length() - 1;
-}
-
-template <>
-template <class S>
-Handle<S> BuiltinArguments<BuiltinExtraArguments::kTarget>::target() {
-  return Arguments::at<S>(Arguments::length() - 1);
-}
-
-template <>
-int BuiltinArguments<BuiltinExtraArguments::kNewTarget>::length() const {
-  return Arguments::length() - 1;
-}
-
-template <>
-Handle<HeapObject>
-BuiltinArguments<BuiltinExtraArguments::kNewTarget>::new_target() {
-  return Arguments::at<HeapObject>(Arguments::length() - 1);
-}
-
-template <>
-int BuiltinArguments<BuiltinExtraArguments::kTargetAndNewTarget>::length()
-    const {
-  return Arguments::length() - 2;
-}
-
-template <>
-template <class S>
-Handle<S>
-BuiltinArguments<BuiltinExtraArguments::kTargetAndNewTarget>::target() {
-  return Arguments::at<S>(Arguments::length() - 2);
-}
-
-template <>
-Handle<HeapObject>
-BuiltinArguments<BuiltinExtraArguments::kTargetAndNewTarget>::new_target() {
-  return Arguments::at<HeapObject>(Arguments::length() - 1);
-}
-
-
-#define DEF_ARG_TYPE(name, spec) \
-  typedef BuiltinArguments<BuiltinExtraArguments::spec> name##ArgumentsType;
-BUILTIN_LIST_C(DEF_ARG_TYPE)
-#undef DEF_ARG_TYPE
-
-
 // ----------------------------------------------------------------------------
 // Support macro for defining builtins in C++.
 // ----------------------------------------------------------------------------
@@ -141,29 +91,29 @@
 // through the BuiltinArguments object args.
 // TODO(cbruni): add global flag to check whether any tracing events have been
 // enabled.
-#define BUILTIN(name)                                                          \
-  MUST_USE_RESULT static Object* Builtin_Impl_##name(name##ArgumentsType args, \
-                                                     Isolate* isolate);        \
-                                                                               \
-  V8_NOINLINE static Object* Builtin_Impl_Stats_##name(                        \
-      int args_length, Object** args_object, Isolate* isolate) {               \
-    name##ArgumentsType args(args_length, args_object);                        \
-    RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Builtin_##name);   \
-    TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"),                      \
-                 "V8.Builtin_" #name);                                         \
-    return Builtin_Impl_##name(args, isolate);                                 \
-  }                                                                            \
-                                                                               \
-  MUST_USE_RESULT static Object* Builtin_##name(                               \
-      int args_length, Object** args_object, Isolate* isolate) {               \
-    if (FLAG_runtime_call_stats) {                                             \
-      return Builtin_Impl_Stats_##name(args_length, args_object, isolate);     \
-    }                                                                          \
-    name##ArgumentsType args(args_length, args_object);                        \
-    return Builtin_Impl_##name(args, isolate);                                 \
-  }                                                                            \
-                                                                               \
-  MUST_USE_RESULT static Object* Builtin_Impl_##name(name##ArgumentsType args, \
+#define BUILTIN(name)                                                        \
+  MUST_USE_RESULT static Object* Builtin_Impl_##name(BuiltinArguments args,  \
+                                                     Isolate* isolate);      \
+                                                                             \
+  V8_NOINLINE static Object* Builtin_Impl_Stats_##name(                      \
+      int args_length, Object** args_object, Isolate* isolate) {             \
+    BuiltinArguments args(args_length, args_object);                         \
+    RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Builtin_##name); \
+    TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"),                    \
+                 "V8.Builtin_" #name);                                       \
+    return Builtin_Impl_##name(args, isolate);                               \
+  }                                                                          \
+                                                                             \
+  MUST_USE_RESULT static Object* Builtin_##name(                             \
+      int args_length, Object** args_object, Isolate* isolate) {             \
+    if (FLAG_runtime_call_stats) {                                           \
+      return Builtin_Impl_Stats_##name(args_length, args_object, isolate);   \
+    }                                                                        \
+    BuiltinArguments args(args_length, args_object);                         \
+    return Builtin_Impl_##name(args, isolate);                               \
+  }                                                                          \
+                                                                             \
+  MUST_USE_RESULT static Object* Builtin_Impl_##name(BuiltinArguments args,  \
                                                      Isolate* isolate)
 
 // ----------------------------------------------------------------------------
@@ -178,8 +128,22 @@
   }                                                                         \
   Handle<Type> name = Handle<Type>::cast(args.receiver())
 
+// Throws a TypeError for {method} if the receiver is not coercible to Object,
+// or converts the receiver to a String otherwise and assigns it to a new var
+// with the given {name}.
+#define TO_THIS_STRING(name, method)                                          \
+  if (args.receiver()->IsNull(isolate) ||                                     \
+      args.receiver()->IsUndefined(isolate)) {                                \
+    THROW_NEW_ERROR_RETURN_FAILURE(                                           \
+        isolate,                                                              \
+        NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,               \
+                     isolate->factory()->NewStringFromAsciiChecked(method))); \
+  }                                                                           \
+  Handle<String> name;                                                        \
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(                                         \
+      isolate, name, Object::ToString(isolate, args.receiver()))
 
-inline bool ClampedToInteger(Object* object, int* out) {
+inline bool ClampedToInteger(Isolate* isolate, Object* object, int* out) {
   // This is an extended version of ECMA-262 7.1.11 handling signed values
   // Try to convert object to a number and clamp values to [kMinInt, kMaxInt]
   if (object->IsSmi()) {
@@ -197,11 +161,11 @@
       *out = static_cast<int>(value);
     }
     return true;
-  } else if (object->IsUndefined() || object->IsNull()) {
+  } else if (object->IsUndefined(isolate) || object->IsNull(isolate)) {
     *out = 0;
     return true;
   } else if (object->IsBoolean()) {
-    *out = object->IsTrue();
+    *out = object->IsTrue(isolate);
     return true;
   }
   return false;
@@ -257,8 +221,7 @@
 
 inline bool HasOnlySimpleElements(Isolate* isolate, JSReceiver* receiver) {
   DisallowHeapAllocation no_gc;
-  PrototypeIterator iter(isolate, receiver,
-                         PrototypeIterator::START_AT_RECEIVER);
+  PrototypeIterator iter(isolate, receiver, kStartAtReceiver);
   for (; !iter.IsAtEnd(); iter.Advance()) {
     if (iter.GetCurrent()->IsJSProxy()) return false;
     JSObject* current = iter.GetCurrent<JSObject>();
@@ -271,7 +234,7 @@
 MUST_USE_RESULT
 inline bool EnsureJSArrayWithWritableFastElements(Isolate* isolate,
                                                   Handle<Object> receiver,
-                                                  Arguments* args,
+                                                  BuiltinArguments* args,
                                                   int first_added_arg) {
   if (!receiver->IsJSArray()) return false;
   Handle<JSArray> array = Handle<JSArray>::cast(receiver);
@@ -318,25 +281,18 @@
   return true;
 }
 
-
-MUST_USE_RESULT static Object* CallJsIntrinsic(
-    Isolate* isolate, Handle<JSFunction> function,
-    BuiltinArguments<BuiltinExtraArguments::kNone> args) {
+MUST_USE_RESULT static Object* CallJsIntrinsic(Isolate* isolate,
+                                               Handle<JSFunction> function,
+                                               BuiltinArguments args) {
   HandleScope handleScope(isolate);
   int argc = args.length() - 1;
   ScopedVector<Handle<Object> > argv(argc);
   for (int i = 0; i < argc; ++i) {
     argv[i] = args.at<Object>(i + 1);
   }
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      Execution::Call(isolate,
-                      function,
-                      args.receiver(),
-                      argc,
-                      argv.start()));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(
+      isolate,
+      Execution::Call(isolate, function, args.receiver(), argc, argv.start()));
 }
 
 
@@ -412,7 +368,7 @@
                        &call_runtime);
 
   assembler->Bind(&if_iskeyunique);
-  assembler->TryLookupProperty(object, map, instance_type, key, &return_true,
+  assembler->TryHasOwnProperty(object, map, instance_type, key, &return_true,
                                &return_false, &call_runtime);
 
   assembler->Bind(&keyisindex);
@@ -432,8 +388,7 @@
 
 namespace {
 
-Object* DoArrayPush(Isolate* isolate,
-                    BuiltinArguments<BuiltinExtraArguments::kNone> args) {
+Object* DoArrayPush(Isolate* isolate, BuiltinArguments args) {
   HandleScope scope(isolate);
   Handle<Object> receiver = args.receiver();
   if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1)) {
@@ -467,8 +422,8 @@
   DCHECK_EQ(2, args.length());
   Arguments* incoming = reinterpret_cast<Arguments*>(args[0]);
   // Rewrap the arguments as builtins arguments.
-  BuiltinArguments<BuiltinExtraArguments::kNone> caller_args(
-      incoming->length() + 1, incoming->arguments() + 1);
+  BuiltinArguments caller_args(incoming->length() + 3,
+                               incoming->arguments() + 1);
   return DoArrayPush(isolate, caller_args);
 }
 
@@ -588,16 +543,16 @@
   relative_end = len;
   if (argument_count > 0) {
     DisallowHeapAllocation no_gc;
-    if (!ClampedToInteger(args[1], &relative_start)) {
+    if (!ClampedToInteger(isolate, args[1], &relative_start)) {
       AllowHeapAllocation allow_allocation;
       return CallJsIntrinsic(isolate, isolate->array_slice(), args);
     }
     if (argument_count > 1) {
       Object* end_arg = args[2];
       // slice handles the end_arg specially
-      if (end_arg->IsUndefined()) {
+      if (end_arg->IsUndefined(isolate)) {
         relative_end = len;
-      } else if (!ClampedToInteger(end_arg, &relative_end)) {
+      } else if (!ClampedToInteger(isolate, end_arg, &relative_end)) {
         AllowHeapAllocation allow_allocation;
         return CallJsIntrinsic(isolate, isolate->array_slice(), args);
       }
@@ -635,7 +590,7 @@
   int relative_start = 0;
   if (argument_count > 0) {
     DisallowHeapAllocation no_gc;
-    if (!ClampedToInteger(args[1], &relative_start)) {
+    if (!ClampedToInteger(isolate, args[1], &relative_start)) {
       AllowHeapAllocation allow_allocation;
       return CallJsIntrinsic(isolate, isolate->array_splice(), args);
     }
@@ -657,7 +612,7 @@
     int delete_count = 0;
     DisallowHeapAllocation no_gc;
     if (argument_count > 1) {
-      if (!ClampedToInteger(args[2], &delete_count)) {
+      if (!ClampedToInteger(isolate, args[2], &delete_count)) {
         AllowHeapAllocation allow_allocation;
         return CallJsIntrinsic(isolate, isolate->array_splice(), args);
       }
@@ -810,7 +765,7 @@
     FOR_WITH_HANDLE_SCOPE(
         isolate_, uint32_t, i = 0, i, i < current_length, i++, {
           Handle<Object> element(current_storage->get(i), isolate_);
-          if (!element->IsTheHole()) {
+          if (!element->IsTheHole(isolate_)) {
             // The object holding this backing store has just been allocated, so
             // it cannot yet be used as a prototype.
             Handle<SeededNumberDictionary> new_storage =
@@ -856,6 +811,7 @@
 
 
 uint32_t EstimateElementCount(Handle<JSArray> array) {
+  DisallowHeapAllocation no_gc;
   uint32_t length = static_cast<uint32_t>(array->length()->Number());
   int element_count = 0;
   switch (array->GetElementsKind()) {
@@ -867,9 +823,10 @@
       // a 32-bit signed integer.
       DCHECK(static_cast<int32_t>(FixedArray::kMaxLength) >= 0);
       int fast_length = static_cast<int>(length);
-      Handle<FixedArray> elements(FixedArray::cast(array->elements()));
+      Isolate* isolate = array->GetIsolate();
+      FixedArray* elements = FixedArray::cast(array->elements());
       for (int i = 0; i < fast_length; i++) {
-        if (!elements->get(i)->IsTheHole()) element_count++;
+        if (!elements->get(i)->IsTheHole(isolate)) element_count++;
       }
       break;
     }
@@ -883,20 +840,20 @@
         DCHECK(FixedArray::cast(array->elements())->length() == 0);
         break;
       }
-      Handle<FixedDoubleArray> elements(
-          FixedDoubleArray::cast(array->elements()));
+      FixedDoubleArray* elements = FixedDoubleArray::cast(array->elements());
       for (int i = 0; i < fast_length; i++) {
         if (!elements->is_the_hole(i)) element_count++;
       }
       break;
     }
     case DICTIONARY_ELEMENTS: {
-      Handle<SeededNumberDictionary> dictionary(
-          SeededNumberDictionary::cast(array->elements()));
+      SeededNumberDictionary* dictionary =
+          SeededNumberDictionary::cast(array->elements());
+      Isolate* isolate = dictionary->GetIsolate();
       int capacity = dictionary->Capacity();
       for (int i = 0; i < capacity; i++) {
-        Handle<Object> key(dictionary->KeyAt(i), array->GetIsolate());
-        if (dictionary->IsKey(*key)) {
+        Object* key = dictionary->KeyAt(i);
+        if (dictionary->IsKey(isolate, key)) {
           element_count++;
         }
       }
@@ -945,7 +902,7 @@
       uint32_t length = static_cast<uint32_t>(elements->length());
       if (range < length) length = range;
       for (uint32_t i = 0; i < length; i++) {
-        if (!elements->get(i)->IsTheHole()) {
+        if (!elements->get(i)->IsTheHole(isolate)) {
           indices->Add(i);
         }
       }
@@ -973,13 +930,9 @@
       SeededNumberDictionary* dict =
           SeededNumberDictionary::cast(object->elements());
       uint32_t capacity = dict->Capacity();
-      Heap* heap = isolate->heap();
-      Object* undefined = heap->undefined_value();
-      Object* the_hole = heap->the_hole_value();
       FOR_WITH_HANDLE_SCOPE(isolate, uint32_t, j = 0, j, j < capacity, j++, {
         Object* k = dict->KeyAt(j);
-        if (k == undefined) continue;
-        if (k == the_hole) continue;
+        if (!dict->IsKey(isolate, k)) continue;
         DCHECK(k->IsNumber());
         uint32_t index = static_cast<uint32_t>(k->Number());
         if (index < range) {
@@ -1088,12 +1041,8 @@
     length = static_cast<uint32_t>(array->length()->Number());
   } else {
     Handle<Object> val;
-    Handle<Object> key = isolate->factory()->length_string();
     ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-        isolate, val, Runtime::GetObjectProperty(isolate, receiver, key),
-        false);
-    ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, val,
-                                     Object::ToLength(isolate, val), false);
+        isolate, val, Object::GetLengthFromArrayLike(isolate, receiver), false);
     // TODO(caitp): Support larger element indexes (up to 2^53-1).
     if (!val->ToUint32(&length)) {
       length = 0;
@@ -1119,7 +1068,7 @@
       DCHECK(fast_length <= elements->length());
       FOR_WITH_HANDLE_SCOPE(isolate, int, j = 0, j, j < fast_length, j++, {
         Handle<Object> element_value(elements->get(j), isolate);
-        if (!element_value->IsTheHole()) {
+        if (!element_value->IsTheHole(isolate)) {
           if (!visitor->visit(j, element_value)) return false;
         } else {
           Maybe<bool> maybe = JSReceiver::HasElement(array, j);
@@ -1233,13 +1182,12 @@
     MaybeHandle<Object> maybeValue =
         i::Runtime::GetObjectProperty(isolate, obj, key);
     if (!maybeValue.ToHandle(&value)) return Nothing<bool>();
-    if (!value->IsUndefined()) return Just(value->BooleanValue());
+    if (!value->IsUndefined(isolate)) return Just(value->BooleanValue());
   }
   return Object::IsArray(obj);
 }
 
-
-Object* Slow_ArrayConcat(Arguments* args, Handle<Object> species,
+Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
                          Isolate* isolate) {
   int argument_count = args->length();
 
@@ -1436,7 +1384,8 @@
   return false;
 }
 
-MaybeHandle<JSArray> Fast_ArrayConcat(Isolate* isolate, Arguments* args) {
+MaybeHandle<JSArray> Fast_ArrayConcat(Isolate* isolate,
+                                      BuiltinArguments* args) {
   if (!isolate->IsIsConcatSpreadableLookupChainIntact()) {
     return MaybeHandle<JSArray>();
   }
@@ -1471,7 +1420,8 @@
       result_len += Smi::cast(array->length())->value();
       DCHECK(result_len >= 0);
       // Throw an Error if we overflow the FixedArray limits
-      if (FixedArray::kMaxLength < result_len) {
+      if (FixedDoubleArray::kMaxLength < result_len ||
+          FixedArray::kMaxLength < result_len) {
         AllowHeapAllocation gc;
         THROW_NEW_ERROR(isolate,
                         NewRangeError(MessageTemplate::kInvalidArrayLength),
@@ -1491,7 +1441,7 @@
 
   Handle<Object> receiver = args.receiver();
   // TODO(bmeurer): Do we really care about the exact exception message here?
-  if (receiver->IsNull() || receiver->IsUndefined()) {
+  if (receiver->IsNull(isolate) || receiver->IsUndefined(isolate)) {
     THROW_NEW_ERROR_RETURN_FAILURE(
         isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
                               isolate->factory()->NewStringFromAsciiChecked(
@@ -1636,8 +1586,9 @@
     // 4b ii. Let keys be ? from.[[OwnPropertyKeys]]().
     Handle<FixedArray> keys;
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, keys,
-        JSReceiver::GetKeys(from, OWN_ONLY, ALL_PROPERTIES, KEEP_NUMBERS));
+        isolate, keys, KeyAccumulator::GetKeys(
+                           from, KeyCollectionMode::kOwnOnly, ALL_PROPERTIES,
+                           GetKeysConversion::kKeepNumbers));
     // 4c. Repeat for each element nextKey of keys in List order,
     for (int j = 0; j < keys->length(); ++j) {
       Handle<Object> next_key(keys->get(j), isolate);
@@ -1667,10 +1618,12 @@
 
 
 // ES6 section 19.1.2.2 Object.create ( O [ , Properties ] )
+// TODO(verwaest): Support the common cases with precached map directly in
+// an Object.create stub.
 BUILTIN(ObjectCreate) {
   HandleScope scope(isolate);
   Handle<Object> prototype = args.atOrUndefined(isolate, 1);
-  if (!prototype->IsNull() && !prototype->IsJSReceiver()) {
+  if (!prototype->IsNull(isolate) && !prototype->IsJSReceiver()) {
     THROW_NEW_ERROR_RETURN_FAILURE(
         isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, prototype));
   }
@@ -1682,7 +1635,26 @@
   Handle<Map> map(isolate->native_context()->object_function()->initial_map(),
                   isolate);
   if (map->prototype() != *prototype) {
-    map = Map::TransitionToPrototype(map, prototype, FAST_PROTOTYPE);
+    if (prototype->IsNull(isolate)) {
+      map = isolate->object_with_null_prototype_map();
+    } else if (prototype->IsJSObject()) {
+      Handle<JSObject> js_prototype = Handle<JSObject>::cast(prototype);
+      if (!js_prototype->map()->is_prototype_map()) {
+        JSObject::OptimizeAsPrototype(js_prototype, FAST_PROTOTYPE);
+      }
+      Handle<PrototypeInfo> info =
+          Map::GetOrCreatePrototypeInfo(js_prototype, isolate);
+      // TODO(verwaest): Use inobject slack tracking for this map.
+      if (info->HasObjectCreateMap()) {
+        map = handle(info->ObjectCreateMap(), isolate);
+      } else {
+        map = Map::CopyInitialMap(map);
+        Map::SetPrototype(map, prototype, FAST_PROTOTYPE);
+        PrototypeInfo::SetObjectCreateMap(info, map);
+      }
+    } else {
+      map = Map::TransitionToPrototype(map, prototype, REGULAR_PROTOTYPE);
+    }
   }
 
   // Actually allocate the object.
@@ -1690,7 +1662,7 @@
 
   // Define the properties if properties was specified and is not undefined.
   Handle<Object> properties = args.atOrUndefined(isolate, 2);
-  if (!properties->IsUndefined()) {
+  if (!properties->IsUndefined(isolate)) {
     RETURN_FAILURE_ON_EXCEPTION(
         isolate, JSReceiver::DefineProperties(isolate, object, properties));
   }
@@ -1705,11 +1677,8 @@
   Handle<Object> target = args.at<Object>(1);
   Handle<Object> properties = args.at<Object>(2);
 
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      JSReceiver::DefineProperties(isolate, target, properties));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(
+      isolate, JSReceiver::DefineProperties(isolate, target, properties));
 }
 
 // ES6 section 19.1.2.4 Object.defineProperty
@@ -1760,6 +1729,9 @@
   Maybe<bool> success = JSReceiver::DefineOwnProperty(
       isolate, receiver, name, &desc, Object::DONT_THROW);
   MAYBE_RETURN(success, isolate->heap()->exception());
+  if (!success.FromJust()) {
+    isolate->CountUsage(v8::Isolate::kDefineGetterOrSetterWouldThrow);
+  }
   // 6. Return undefined.
   return isolate->heap()->undefined_value();
 }
@@ -1871,11 +1843,8 @@
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, receiver, Object::ToObject(isolate, object));
 
-  Handle<Object> prototype;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, prototype, JSReceiver::GetPrototype(isolate, receiver));
-
-  return *prototype;
+  RETURN_RESULT_OR_FAILURE(isolate,
+                           JSReceiver::GetPrototype(isolate, receiver));
 }
 
 
@@ -1905,8 +1874,7 @@
 
 namespace {
 
-Object* GetOwnPropertyKeys(Isolate* isolate,
-                           BuiltinArguments<BuiltinExtraArguments::kNone> args,
+Object* GetOwnPropertyKeys(Isolate* isolate, BuiltinArguments args,
                            PropertyFilter filter) {
   HandleScope scope(isolate);
   Handle<Object> object = args.atOrUndefined(isolate, 1);
@@ -1916,7 +1884,8 @@
   Handle<FixedArray> keys;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, keys,
-      JSReceiver::GetKeys(receiver, OWN_ONLY, filter, CONVERT_TO_STRING));
+      KeyAccumulator::GetKeys(receiver, KeyCollectionMode::kOwnOnly, filter,
+                              GetKeysConversion::kConvertToString));
   return *isolate->factory()->NewJSArrayWithElements(keys);
 }
 
@@ -2012,8 +1981,9 @@
   } else {
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
         isolate, keys,
-        JSReceiver::GetKeys(receiver, OWN_ONLY, ENUMERABLE_STRINGS,
-                            CONVERT_TO_STRING));
+        KeyAccumulator::GetKeys(receiver, KeyCollectionMode::kOwnOnly,
+                                ENUMERABLE_STRINGS,
+                                GetKeysConversion::kConvertToString));
   }
   return *isolate->factory()->NewJSArrayWithElements(keys, FAST_ELEMENTS);
 }
@@ -2055,8 +2025,9 @@
 
   Handle<FixedArray> keys;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, keys, JSReceiver::GetKeys(receiver, OWN_ONLY, ALL_PROPERTIES,
-                                         CONVERT_TO_STRING));
+      isolate, keys, KeyAccumulator::GetKeys(
+                         receiver, KeyCollectionMode::kOwnOnly, ALL_PROPERTIES,
+                         GetKeysConversion::kConvertToString));
 
   Handle<JSObject> descriptors =
       isolate->factory()->NewJSObject(isolate->object_function());
@@ -2107,6 +2078,29 @@
   return *object;
 }
 
+// ES6 section 18.2.6.2 decodeURI (encodedURI)
+BUILTIN(GlobalDecodeURI) {
+  HandleScope scope(isolate);
+  Handle<String> encoded_uri;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, encoded_uri,
+      Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
+
+  RETURN_RESULT_OR_FAILURE(isolate, Uri::DecodeUri(isolate, encoded_uri));
+}
+
+// ES6 section 18.2.6.3 decodeURIComponent (encodedURIComponent)
+BUILTIN(GlobalDecodeURIComponent) {
+  HandleScope scope(isolate);
+  Handle<String> encoded_uri_component;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, encoded_uri_component,
+      Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
+
+  RETURN_RESULT_OR_FAILURE(
+      isolate, Uri::DecodeUriComponent(isolate, encoded_uri_component));
+}
+
 // ES6 section 18.2.6.4 encodeURI (uri)
 BUILTIN(GlobalEncodeURI) {
   HandleScope scope(isolate);
@@ -2114,25 +2108,48 @@
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, uri, Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
 
-  return Uri::EncodeUri(isolate, uri);
+  RETURN_RESULT_OR_FAILURE(isolate, Uri::EncodeUri(isolate, uri));
 }
 
 // ES6 section 18.2.6.5 encodeURIComponenet (uriComponent)
 BUILTIN(GlobalEncodeURIComponent) {
   HandleScope scope(isolate);
-  Handle<String> uriComponent;
+  Handle<String> uri_component;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, uriComponent,
+      isolate, uri_component,
       Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
 
-  return Uri::EncodeUriComponent(isolate, uriComponent);
+  RETURN_RESULT_OR_FAILURE(isolate,
+                           Uri::EncodeUriComponent(isolate, uri_component));
+}
+
+// ES6 section B.2.1.1 escape (string)
+BUILTIN(GlobalEscape) {
+  HandleScope scope(isolate);
+  Handle<String> string;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, string,
+      Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
+
+  RETURN_RESULT_OR_FAILURE(isolate, Uri::Escape(isolate, string));
+}
+
+// ES6 section B.2.1.2 unescape (string)
+BUILTIN(GlobalUnescape) {
+  HandleScope scope(isolate);
+  Handle<String> string;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, string,
+      Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
+
+  RETURN_RESULT_OR_FAILURE(isolate, Uri::Unescape(isolate, string));
 }
 
 namespace {
 
 bool CodeGenerationFromStringsAllowed(Isolate* isolate,
                                       Handle<Context> context) {
-  DCHECK(context->allow_code_gen_from_strings()->IsFalse());
+  DCHECK(context->allow_code_gen_from_strings()->IsFalse(isolate));
   // Check with callback if set.
   AllowCodeGenerationFromStringsCallback callback =
       isolate->allow_code_gen_callback();
@@ -2155,7 +2172,7 @@
 
   // Check if native context allows code generation from
   // strings. Throw an exception if it doesn't.
-  if (native_context->allow_code_gen_from_strings()->IsFalse() &&
+  if (native_context->allow_code_gen_from_strings()->IsFalse(isolate) &&
       !CodeGenerationFromStringsAllowed(isolate, native_context)) {
     Handle<Object> error_message =
         native_context->ErrorMessageForCodeGenerationFromStrings();
@@ -2188,13 +2205,36 @@
       isolate, function,
       CompileString(handle(target->native_context(), isolate),
                     Handle<String>::cast(x), NO_PARSE_RESTRICTION));
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
+  RETURN_RESULT_OR_FAILURE(
+      isolate,
       Execution::Call(isolate, function, target_global_proxy, 0, nullptr));
-  return *result;
 }
 
+// ES6 section 24.3.1 JSON.parse.
+BUILTIN(JsonParse) {
+  HandleScope scope(isolate);
+  Handle<Object> source = args.atOrUndefined(isolate, 1);
+  Handle<Object> reviver = args.atOrUndefined(isolate, 2);
+  Handle<String> string;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, string,
+                                     Object::ToString(isolate, source));
+  string = String::Flatten(string);
+  RETURN_RESULT_OR_FAILURE(
+      isolate, string->IsSeqOneByteString()
+                   ? JsonParser<true>::Parse(isolate, string, reviver)
+                   : JsonParser<false>::Parse(isolate, string, reviver));
+}
+
+// ES6 section 24.3.2 JSON.stringify.
+BUILTIN(JsonStringify) {
+  HandleScope scope(isolate);
+  JsonStringifier stringifier(isolate);
+  Handle<Object> object = args.atOrUndefined(isolate, 1);
+  Handle<Object> replacer = args.atOrUndefined(isolate, 2);
+  Handle<Object> indent = args.atOrUndefined(isolate, 3);
+  RETURN_RESULT_OR_FAILURE(isolate,
+                           stringifier.Stringify(object, replacer, indent));
+}
 
 // -----------------------------------------------------------------------------
 // ES6 section 20.2.2 Function Properties of the Math Object
@@ -2219,14 +2259,42 @@
   return *isolate->factory()->NewHeapNumber(std::asin(x->Number()));
 }
 
-
 // ES6 section 20.2.2.6 Math.atan ( x )
-BUILTIN(MathAtan) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  Handle<Object> x = args.at<Object>(1);
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, Object::ToNumber(x));
-  return *isolate->factory()->NewHeapNumber(std::atan(x->Number()));
+void Builtins::Generate_MathAtan(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Atan(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.8 Math.atan2 ( y, x )
+void Builtins::Generate_MathAtan2(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* y = assembler->Parameter(1);
+  Node* x = assembler->Parameter(2);
+  Node* context = assembler->Parameter(5);
+  Node* y_value = assembler->TruncateTaggedToFloat64(context, y);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Atan2(y_value, x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.7 Math.atanh ( x )
+void Builtins::Generate_MathAtanh(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Atanh(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
 }
 
 namespace {
@@ -2297,6 +2365,18 @@
   Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Ceil);
 }
 
+// ES6 section 20.2.2.9 Math.cbrt ( x )
+void Builtins::Generate_MathCbrt(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Cbrt(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
 // ES6 section 20.2.2.11 Math.clz32 ( x )
 void Builtins::Generate_MathClz32(CodeStubAssembler* assembler) {
   typedef CodeStubAssembler::Label Label;
@@ -2365,6 +2445,30 @@
   }
 }
 
+// ES6 section 20.2.2.12 Math.cos ( x )
+void Builtins::Generate_MathCos(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Cos(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.14 Math.exp ( x )
+void Builtins::Generate_MathExp(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Exp(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
 // ES6 section 20.2.2.16 Math.floor ( x )
 void Builtins::Generate_MathFloor(CodeStubAssembler* assembler) {
   Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Floor);
@@ -2392,11 +2496,83 @@
   return *isolate->factory()->NewNumberFromInt(product);
 }
 
+// ES6 section 20.2.2.20 Math.log ( x )
+void Builtins::Generate_MathLog(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Log(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.21 Math.log1p ( x )
+void Builtins::Generate_MathLog1p(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Log1p(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.23 Math.log2 ( x )
+void Builtins::Generate_MathLog2(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Log2(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.22 Math.log10 ( x )
+void Builtins::Generate_MathLog10(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Log10(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.15 Math.expm1 ( x )
+void Builtins::Generate_MathExpm1(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Expm1(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
 // ES6 section 20.2.2.28 Math.round ( x )
 void Builtins::Generate_MathRound(CodeStubAssembler* assembler) {
   Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Round);
 }
 
+// ES6 section 20.2.2.30 Math.sin ( x )
+void Builtins::Generate_MathSin(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Sin(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
 // ES6 section 20.2.2.32 Math.sqrt ( x )
 void Builtins::Generate_MathSqrt(CodeStubAssembler* assembler) {
   using compiler::Node;
@@ -2409,6 +2585,18 @@
   assembler->Return(result);
 }
 
+// ES6 section 20.2.2.33 Math.tan ( x )
+void Builtins::Generate_MathTan(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Tan(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
 // ES6 section 20.2.2.35 Math.trunc ( x )
 void Builtins::Generate_MathTrunc(CodeStubAssembler* assembler) {
   Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Trunc);
@@ -2612,12 +2800,9 @@
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
                                      Object::ToName(isolate, key));
 
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, Object::GetPropertyOrElement(
-                           receiver, name, Handle<JSReceiver>::cast(target)));
-
-  return *result;
+  RETURN_RESULT_OR_FAILURE(
+      isolate, Object::GetPropertyOrElement(receiver, name,
+                                            Handle<JSReceiver>::cast(target)));
 }
 
 
@@ -2660,11 +2845,9 @@
                               isolate->factory()->NewStringFromAsciiChecked(
                                   "Reflect.getPrototypeOf")));
   }
-  Handle<Object> prototype;
   Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(target);
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, prototype, JSReceiver::GetPrototype(isolate, receiver));
-  return *prototype;
+  RETURN_RESULT_OR_FAILURE(isolate,
+                           JSReceiver::GetPrototype(isolate, receiver));
 }
 
 
@@ -2729,8 +2912,9 @@
   Handle<FixedArray> keys;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, keys,
-      JSReceiver::GetKeys(Handle<JSReceiver>::cast(target), OWN_ONLY,
-                          ALL_PROPERTIES, CONVERT_TO_STRING));
+      KeyAccumulator::GetKeys(Handle<JSReceiver>::cast(target),
+                              KeyCollectionMode::kOwnOnly, ALL_PROPERTIES,
+                              GetKeysConversion::kConvertToString));
   return *isolate->factory()->NewJSArrayWithElements(keys);
 }
 
@@ -2797,7 +2981,7 @@
                                   "Reflect.setPrototypeOf")));
   }
 
-  if (!proto->IsJSReceiver() && !proto->IsNull()) {
+  if (!proto->IsJSReceiver() && !proto->IsNull(isolate)) {
     THROW_NEW_ERROR_RETURN_FAILURE(
         isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, proto));
   }
@@ -2905,7 +3089,7 @@
 
   // 4. Let numberOffset be ? ToNumber(byteOffset).
   Handle<Object> number_offset;
-  if (byte_offset->IsUndefined()) {
+  if (byte_offset->IsUndefined(isolate)) {
     // We intentionally violate the specification at this point to allow
     // for new DataView(buffer) invocations to be equivalent to the full
     // new DataView(buffer, 0) invocation.
@@ -2940,7 +3124,7 @@
   }
 
   Handle<Object> view_byte_length;
-  if (byte_length->IsUndefined()) {
+  if (byte_length->IsUndefined(isolate)) {
     // 10. If byteLength is undefined, then
     //       a. Let viewByteLength be bufferByteLength - offset.
     view_byte_length =
@@ -2982,6 +3166,119 @@
   return *result;
 }
 
+// ES6 section 24.2.4.1 get DataView.prototype.buffer
+BUILTIN(DataViewPrototypeGetBuffer) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDataView, data_view, "get DataView.prototype.buffer");
+  return data_view->buffer();
+}
+
+// ES6 section 24.2.4.2 get DataView.prototype.byteLength
+BUILTIN(DataViewPrototypeGetByteLength) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDataView, data_view, "get DataView.prototype.byteLength");
+  // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
+  // here if the JSArrayBuffer of the {data_view} was neutered.
+  return data_view->byte_length();
+}
+
+// ES6 section 24.2.4.3 get DataView.prototype.byteOffset
+BUILTIN(DataViewPrototypeGetByteOffset) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDataView, data_view, "get DataView.prototype.byteOffset");
+  // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
+  // here if the JSArrayBuffer of the {data_view} was neutered.
+  return data_view->byte_offset();
+}
+
+// -----------------------------------------------------------------------------
+// ES6 section 22.2 TypedArray Objects
+
+// ES6 section 22.2.3.1 get %TypedArray%.prototype.buffer
+BUILTIN(TypedArrayPrototypeBuffer) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSTypedArray, typed_array, "get TypedArray.prototype.buffer");
+  return *typed_array->GetBuffer();
+}
+
+namespace {
+
+void Generate_TypedArrayProtoypeGetter(CodeStubAssembler* assembler,
+                                       const char* method_name,
+                                       int object_offset) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(0);
+  Node* context = assembler->Parameter(3);
+
+  // Check if the {receiver} is actually a JSTypedArray.
+  Label if_receiverisincompatible(assembler, Label::kDeferred);
+  assembler->GotoIf(assembler->WordIsSmi(receiver), &if_receiverisincompatible);
+  Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
+  assembler->GotoUnless(
+      assembler->Word32Equal(receiver_instance_type,
+                             assembler->Int32Constant(JS_TYPED_ARRAY_TYPE)),
+      &if_receiverisincompatible);
+
+  // Check if the {receiver}'s JSArrayBuffer was neutered.
+  Node* receiver_buffer =
+      assembler->LoadObjectField(receiver, JSTypedArray::kBufferOffset);
+  Node* receiver_buffer_bit_field = assembler->LoadObjectField(
+      receiver_buffer, JSArrayBuffer::kBitFieldOffset, MachineType::Uint32());
+  Label if_receiverisneutered(assembler, Label::kDeferred);
+  assembler->GotoUnless(
+      assembler->Word32Equal(
+          assembler->Word32And(
+              receiver_buffer_bit_field,
+              assembler->Int32Constant(JSArrayBuffer::WasNeutered::kMask)),
+          assembler->Int32Constant(0)),
+      &if_receiverisneutered);
+  assembler->Return(assembler->LoadObjectField(receiver, object_offset));
+
+  assembler->Bind(&if_receiverisneutered);
+  {
+    // The {receiver}s buffer was neutered, default to zero.
+    assembler->Return(assembler->SmiConstant(0));
+  }
+
+  assembler->Bind(&if_receiverisincompatible);
+  {
+    // The {receiver} is not a valid JSGeneratorObject.
+    Node* result = assembler->CallRuntime(
+        Runtime::kThrowIncompatibleMethodReceiver, context,
+        assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
+            method_name, TENURED)),
+        receiver);
+    assembler->Return(result);  // Never reached.
+  }
+}
+
+}  // namespace
+
+// ES6 section 22.2.3.2 get %TypedArray%.prototype.byteLength
+void Builtins::Generate_TypedArrayPrototypeByteLength(
+    CodeStubAssembler* assembler) {
+  Generate_TypedArrayProtoypeGetter(assembler,
+                                    "get TypedArray.prototype.byteLength",
+                                    JSTypedArray::kByteLengthOffset);
+}
+
+// ES6 section 22.2.3.3 get %TypedArray%.prototype.byteOffset
+void Builtins::Generate_TypedArrayPrototypeByteOffset(
+    CodeStubAssembler* assembler) {
+  Generate_TypedArrayProtoypeGetter(assembler,
+                                    "get TypedArray.prototype.byteOffset",
+                                    JSTypedArray::kByteOffsetOffset);
+}
+
+// ES6 section 22.2.3.18 get %TypedArray%.prototype.length
+void Builtins::Generate_TypedArrayPrototypeLength(
+    CodeStubAssembler* assembler) {
+  Generate_TypedArrayProtoypeGetter(assembler,
+                                    "get TypedArray.prototype.length",
+                                    JSTypedArray::kLengthOffset);
+}
 
 // -----------------------------------------------------------------------------
 // ES6 section 20.3 Date Objects
@@ -3100,11 +3397,9 @@
   String::FlatContent str_content = str->GetFlatContent();
   bool result;
   if (str_content.IsOneByte()) {
-    result = DateParser::Parse(str_content.ToOneByteVector(), *tmp,
-                               isolate->unicode_cache());
+    result = DateParser::Parse(isolate, str_content.ToOneByteVector(), *tmp);
   } else {
-    result = DateParser::Parse(str_content.ToUC16Vector(), *tmp,
-                               isolate->unicode_cache());
+    result = DateParser::Parse(isolate, str_content.ToUC16Vector(), *tmp);
   }
   if (!result) return std::numeric_limits<double>::quiet_NaN();
   double const day = MakeDay(tmp->get(0)->Number(), tmp->get(1)->Number(),
@@ -3112,7 +3407,7 @@
   double const time = MakeTime(tmp->get(3)->Number(), tmp->get(4)->Number(),
                                tmp->get(5)->Number(), tmp->get(6)->Number());
   double date = MakeDate(day, time);
-  if (tmp->get(7)->IsNull()) {
+  if (tmp->get(7)->IsNull(isolate)) {
     if (!std::isnan(date)) {
       date = isolate->date_cache()->ToUTC(static_cast<int64_t>(date));
     }
@@ -3183,11 +3478,8 @@
   double const time_val = JSDate::CurrentTimeValue(isolate);
   char buffer[128];
   ToDateString(time_val, ArrayVector(buffer), isolate->date_cache());
-  Handle<String> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(
+      isolate, isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
 }
 
 
@@ -3269,10 +3561,7 @@
       time_val = std::numeric_limits<double>::quiet_NaN();
     }
   }
-  Handle<JSDate> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     JSDate::New(target, new_target, time_val));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, JSDate::New(target, new_target, time_val));
 }
 
 
@@ -3767,11 +4056,8 @@
   char buffer[128];
   ToDateString(date->value()->Number(), ArrayVector(buffer),
                isolate->date_cache(), kDateOnly);
-  Handle<String> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(
+      isolate, isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
 }
 
 
@@ -3810,11 +4096,8 @@
   char buffer[128];
   ToDateString(date->value()->Number(), ArrayVector(buffer),
                isolate->date_cache());
-  Handle<String> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(
+      isolate, isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
 }
 
 
@@ -3825,11 +4108,8 @@
   char buffer[128];
   ToDateString(date->value()->Number(), ArrayVector(buffer),
                isolate->date_cache(), kTimeOnly);
-  Handle<String> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(
+      isolate, isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
 }
 
 
@@ -3867,10 +4147,7 @@
   DCHECK_EQ(2, args.length());
   CHECK_RECEIVER(JSReceiver, receiver, "Date.prototype [ @@toPrimitive ]");
   Handle<Object> hint = args.at<Object>(1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     JSDate::ToPrimitive(receiver, hint));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, JSDate::ToPrimitive(receiver, hint));
 }
 
 
@@ -3914,6 +4191,33 @@
   return SetLocalDateValue(date, time_val);
 }
 
+// ES6 section 20.3.4.37 Date.prototype.toJSON ( key )
+BUILTIN(DatePrototypeToJson) {
+  HandleScope scope(isolate);
+  Handle<Object> receiver = args.atOrUndefined(isolate, 0);
+  Handle<JSReceiver> receiver_obj;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver_obj,
+                                     Object::ToObject(isolate, receiver));
+  Handle<Object> primitive;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, primitive,
+      Object::ToPrimitive(receiver_obj, ToPrimitiveHint::kNumber));
+  if (primitive->IsNumber() && !std::isfinite(primitive->Number())) {
+    return isolate->heap()->null_value();
+  } else {
+    Handle<String> name =
+        isolate->factory()->NewStringFromAsciiChecked("toISOString");
+    Handle<Object> function;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, function,
+                                       Object::GetProperty(receiver_obj, name));
+    if (!function->IsCallable()) {
+      THROW_NEW_ERROR_RETURN_FAILURE(
+          isolate, NewTypeError(MessageTemplate::kCalledNonCallable, name));
+    }
+    RETURN_RESULT_OR_FAILURE(
+        isolate, Execution::Call(isolate, function, receiver_obj, 0, NULL));
+  }
+}
 
 // static
 void Builtins::Generate_DatePrototypeGetDate(MacroAssembler* masm) {
@@ -4026,10 +4330,9 @@
 namespace {
 
 // ES6 section 19.2.1.1.1 CreateDynamicFunction
-MaybeHandle<JSFunction> CreateDynamicFunction(
-    Isolate* isolate,
-    BuiltinArguments<BuiltinExtraArguments::kTargetAndNewTarget> args,
-    const char* token) {
+MaybeHandle<JSFunction> CreateDynamicFunction(Isolate* isolate,
+                                              BuiltinArguments args,
+                                              const char* token) {
   // Compute number of arguments, ignoring the receiver.
   DCHECK_LE(1, args.length());
   int const argc = args.length() - 1;
@@ -4115,7 +4418,7 @@
   // function has wrong initial map. To fix that we create a new
   // function object with correct initial map.
   Handle<Object> unchecked_new_target = args.new_target();
-  if (!unchecked_new_target->IsUndefined() &&
+  if (!unchecked_new_target->IsUndefined(isolate) &&
       !unchecked_new_target.is_identical_to(target)) {
     Handle<JSReceiver> new_target =
         Handle<JSReceiver>::cast(unchecked_new_target);
@@ -4147,9 +4450,9 @@
   return *result;
 }
 
+namespace {
 
-// ES6 section 19.2.3.2 Function.prototype.bind ( thisArg, ...args )
-BUILTIN(FunctionPrototypeBind) {
+Object* DoFunctionBind(Isolate* isolate, BuiltinArguments args) {
   HandleScope scope(isolate);
   DCHECK_LE(1, args.length());
   if (!args.receiver()->IsCallable()) {
@@ -4233,6 +4536,22 @@
   return *function;
 }
 
+}  // namespace
+
+// ES6 section 19.2.3.2 Function.prototype.bind ( thisArg, ...args )
+BUILTIN(FunctionPrototypeBind) { return DoFunctionBind(isolate, args); }
+
+// TODO(verwaest): This is a temporary helper until the FastFunctionBind stub
+// can tailcall to the builtin directly.
+RUNTIME_FUNCTION(Runtime_FunctionBind) {
+  DCHECK_EQ(2, args.length());
+  Arguments* incoming = reinterpret_cast<Arguments*>(args[0]);
+  // Rewrap the arguments as builtins arguments.
+  BuiltinArguments caller_args(incoming->length() + 3,
+                               incoming->arguments() + 1);
+  return DoFunctionBind(isolate, caller_args);
+}
+
 // ES6 section 19.2.3.5 Function.prototype.toString ( )
 BUILTIN(FunctionPrototypeToString) {
   HandleScope scope(isolate);
@@ -4252,18 +4571,23 @@
 // ES6 section 25.2.1.1 GeneratorFunction (p1, p2, ... , pn, body)
 BUILTIN(GeneratorFunctionConstructor) {
   HandleScope scope(isolate);
-  Handle<JSFunction> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, CreateDynamicFunction(isolate, args, "function*"));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate,
+                           CreateDynamicFunction(isolate, args, "function*"));
 }
 
 BUILTIN(AsyncFunctionConstructor) {
   HandleScope scope(isolate);
-  Handle<JSFunction> result;
+  Handle<JSFunction> func;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, CreateDynamicFunction(isolate, args, "async function"));
-  return *result;
+      isolate, func, CreateDynamicFunction(isolate, args, "async function"));
+
+  // Do not lazily compute eval position for AsyncFunction, as they may not be
+  // determined after the function is resumed.
+  Handle<Script> script = handle(Script::cast(func->shared()->script()));
+  int position = script->GetEvalPosition();
+  USE(position);
+
+  return *func;
 }
 
 // ES6 section 19.4.1.1 Symbol ( [ description ] ) for the [[Call]] case.
@@ -4271,7 +4595,7 @@
   HandleScope scope(isolate);
   Handle<Symbol> result = isolate->factory()->NewSymbol();
   Handle<Object> description = args.atOrUndefined(isolate, 1);
-  if (!description->IsUndefined()) {
+  if (!description->IsUndefined(isolate)) {
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, description,
                                        Object::ToString(isolate, description));
     result->set_name(*description);
@@ -4293,75 +4617,290 @@
 BUILTIN(ObjectProtoToString) {
   HandleScope scope(isolate);
   Handle<Object> object = args.at<Object>(0);
-  Handle<String> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, Object::ObjectProtoToString(isolate, object));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate,
+                           Object::ObjectProtoToString(isolate, object));
 }
 
 // -----------------------------------------------------------------------------
 // ES6 section 21.1 String Objects
 
-namespace {
+// ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits )
+void Builtins::Generate_StringFromCharCode(CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
 
-bool ToUint16(Handle<Object> value, uint16_t* result) {
-  if (value->IsNumber() || Object::ToNumber(value).ToHandle(&value)) {
-    *result = DoubleToUint32(value->Number());
-    return true;
+  Node* code = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+
+  // Check if we have exactly one argument (plus the implicit receiver), i.e.
+  // if the parent frame is not an arguments adaptor frame.
+  Label if_oneargument(assembler), if_notoneargument(assembler);
+  Node* parent_frame_pointer = assembler->LoadParentFramePointer();
+  Node* parent_frame_type =
+      assembler->Load(MachineType::Pointer(), parent_frame_pointer,
+                      assembler->IntPtrConstant(
+                          CommonFrameConstants::kContextOrFrameTypeOffset));
+  assembler->Branch(
+      assembler->WordEqual(
+          parent_frame_type,
+          assembler->SmiConstant(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))),
+      &if_notoneargument, &if_oneargument);
+
+  assembler->Bind(&if_oneargument);
+  {
+    // Single argument case, perform fast single character string cache lookup
+    // for one-byte code units, or fall back to creating a single character
+    // string on the fly otherwise.
+    Node* code32 = assembler->TruncateTaggedToWord32(context, code);
+    Node* code16 = assembler->Word32And(
+        code32, assembler->Int32Constant(String::kMaxUtf16CodeUnit));
+    Node* result = assembler->StringFromCharCode(code16);
+    assembler->Return(result);
   }
-  return false;
+
+  assembler->Bind(&if_notoneargument);
+  {
+    // Determine the resulting string length.
+    Node* parent_frame_length =
+        assembler->Load(MachineType::Pointer(), parent_frame_pointer,
+                        assembler->IntPtrConstant(
+                            ArgumentsAdaptorFrameConstants::kLengthOffset));
+    Node* length = assembler->SmiToWord(parent_frame_length);
+
+    // Assume that the resulting string contains only one-byte characters.
+    Node* result = assembler->AllocateSeqOneByteString(context, length);
+
+    // Truncate all input parameters and append them to the resulting string.
+    Variable var_offset(assembler, MachineType::PointerRepresentation());
+    Label loop(assembler, &var_offset), done_loop(assembler);
+    var_offset.Bind(assembler->IntPtrConstant(0));
+    assembler->Goto(&loop);
+    assembler->Bind(&loop);
+    {
+      // Load the current {offset}.
+      Node* offset = var_offset.value();
+
+      // Check if we're done with the string.
+      assembler->GotoIf(assembler->WordEqual(offset, length), &done_loop);
+
+      // Load the next code point and truncate it to a 16-bit value.
+      Node* code = assembler->Load(
+          MachineType::AnyTagged(), parent_frame_pointer,
+          assembler->IntPtrAdd(
+              assembler->WordShl(assembler->IntPtrSub(length, offset),
+                                 assembler->IntPtrConstant(kPointerSizeLog2)),
+              assembler->IntPtrConstant(
+                  CommonFrameConstants::kFixedFrameSizeAboveFp -
+                  kPointerSize)));
+      Node* code32 = assembler->TruncateTaggedToWord32(context, code);
+      Node* code16 = assembler->Word32And(
+          code32, assembler->Int32Constant(String::kMaxUtf16CodeUnit));
+
+      // Check if {code16} fits into a one-byte string.
+      Label if_codeisonebyte(assembler), if_codeistwobyte(assembler);
+      assembler->Branch(
+          assembler->Int32LessThanOrEqual(
+              code16, assembler->Int32Constant(String::kMaxOneByteCharCode)),
+          &if_codeisonebyte, &if_codeistwobyte);
+
+      assembler->Bind(&if_codeisonebyte);
+      {
+        // The {code16} fits into the SeqOneByteString {result}.
+        assembler->StoreNoWriteBarrier(
+            MachineRepresentation::kWord8, result,
+            assembler->IntPtrAdd(
+                assembler->IntPtrConstant(SeqOneByteString::kHeaderSize -
+                                          kHeapObjectTag),
+                offset),
+            code16);
+        var_offset.Bind(
+            assembler->IntPtrAdd(offset, assembler->IntPtrConstant(1)));
+        assembler->Goto(&loop);
+      }
+
+      assembler->Bind(&if_codeistwobyte);
+      {
+        // Allocate a SeqTwoByteString to hold the resulting string.
+        Node* cresult = assembler->AllocateSeqTwoByteString(context, length);
+
+        // Copy all characters that were previously written to the
+        // SeqOneByteString in {result} over to the new {cresult}.
+        Variable var_coffset(assembler, MachineType::PointerRepresentation());
+        Label cloop(assembler, &var_coffset), done_cloop(assembler);
+        var_coffset.Bind(assembler->IntPtrConstant(0));
+        assembler->Goto(&cloop);
+        assembler->Bind(&cloop);
+        {
+          Node* coffset = var_coffset.value();
+          assembler->GotoIf(assembler->WordEqual(coffset, offset), &done_cloop);
+          Node* ccode = assembler->Load(
+              MachineType::Uint8(), result,
+              assembler->IntPtrAdd(
+                  assembler->IntPtrConstant(SeqOneByteString::kHeaderSize -
+                                            kHeapObjectTag),
+                  coffset));
+          assembler->StoreNoWriteBarrier(
+              MachineRepresentation::kWord16, cresult,
+              assembler->IntPtrAdd(
+                  assembler->IntPtrConstant(SeqTwoByteString::kHeaderSize -
+                                            kHeapObjectTag),
+                  assembler->WordShl(coffset, 1)),
+              ccode);
+          var_coffset.Bind(
+              assembler->IntPtrAdd(coffset, assembler->IntPtrConstant(1)));
+          assembler->Goto(&cloop);
+        }
+
+        // Write the pending {code16} to {offset}.
+        assembler->Bind(&done_cloop);
+        assembler->StoreNoWriteBarrier(
+            MachineRepresentation::kWord16, cresult,
+            assembler->IntPtrAdd(
+                assembler->IntPtrConstant(SeqTwoByteString::kHeaderSize -
+                                          kHeapObjectTag),
+                assembler->WordShl(offset, 1)),
+            code16);
+
+        // Copy the remaining parameters to the SeqTwoByteString {cresult}.
+        Label floop(assembler, &var_offset), done_floop(assembler);
+        assembler->Goto(&floop);
+        assembler->Bind(&floop);
+        {
+          // Compute the next {offset}.
+          Node* offset = assembler->IntPtrAdd(var_offset.value(),
+                                              assembler->IntPtrConstant(1));
+
+          // Check if we're done with the string.
+          assembler->GotoIf(assembler->WordEqual(offset, length), &done_floop);
+
+          // Load the next code point and truncate it to a 16-bit value.
+          Node* code = assembler->Load(
+              MachineType::AnyTagged(), parent_frame_pointer,
+              assembler->IntPtrAdd(
+                  assembler->WordShl(
+                      assembler->IntPtrSub(length, offset),
+                      assembler->IntPtrConstant(kPointerSizeLog2)),
+                  assembler->IntPtrConstant(
+                      CommonFrameConstants::kFixedFrameSizeAboveFp -
+                      kPointerSize)));
+          Node* code32 = assembler->TruncateTaggedToWord32(context, code);
+          Node* code16 = assembler->Word32And(
+              code32, assembler->Int32Constant(String::kMaxUtf16CodeUnit));
+
+          // Store the truncated {code} point at the next offset.
+          assembler->StoreNoWriteBarrier(
+              MachineRepresentation::kWord16, cresult,
+              assembler->IntPtrAdd(
+                  assembler->IntPtrConstant(SeqTwoByteString::kHeaderSize -
+                                            kHeapObjectTag),
+                  assembler->WordShl(offset, 1)),
+              code16);
+          var_offset.Bind(offset);
+          assembler->Goto(&floop);
+        }
+
+        // Return the SeqTwoByteString.
+        assembler->Bind(&done_floop);
+        assembler->Return(cresult);
+      }
+    }
+
+    assembler->Bind(&done_loop);
+    assembler->Return(result);
+  }
+}
+
+namespace {  // for String.fromCodePoint
+
+bool IsValidCodePoint(Isolate* isolate, Handle<Object> value) {
+  if (!value->IsNumber() && !Object::ToNumber(value).ToHandle(&value)) {
+    return false;
+  }
+
+  if (Object::ToInteger(isolate, value).ToHandleChecked()->Number() !=
+      value->Number()) {
+    return false;
+  }
+
+  if (value->Number() < 0 || value->Number() > 0x10FFFF) {
+    return false;
+  }
+
+  return true;
+}
+
+uc32 NextCodePoint(Isolate* isolate, BuiltinArguments args, int index) {
+  Handle<Object> value = args.at<Object>(1 + index);
+  ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, value, Object::ToNumber(value), -1);
+  if (!IsValidCodePoint(isolate, value)) {
+    isolate->Throw(*isolate->factory()->NewRangeError(
+        MessageTemplate::kInvalidCodePoint, value));
+    return -1;
+  }
+  return DoubleToUint32(value->Number());
 }
 
 }  // namespace
 
-// ES6 21.1.2.1 String.fromCharCode ( ...codeUnits )
-BUILTIN(StringFromCharCode) {
+// ES6 section 21.1.2.2 String.fromCodePoint ( ...codePoints )
+BUILTIN(StringFromCodePoint) {
   HandleScope scope(isolate);
-  // Check resulting string length.
-  int index = 0;
-  Handle<String> result;
   int const length = args.length() - 1;
   if (length == 0) return isolate->heap()->empty_string();
   DCHECK_LT(0, length);
-  // Load the first character code.
-  uint16_t code;
-  if (!ToUint16(args.at<Object>(1), &code)) return isolate->heap()->exception();
-  // Assume that the resulting String contains only one byte characters.
-  if (code <= String::kMaxOneByteCharCodeU) {
-    // Check for single one-byte character fast case.
-    if (length == 1) {
-      return *isolate->factory()->LookupSingleCharacterStringFromCode(code);
+
+  // Optimistically assume that the resulting String contains only one byte
+  // characters.
+  List<uint8_t> one_byte_buffer(length);
+  uc32 code = 0;
+  int index;
+  for (index = 0; index < length; index++) {
+    code = NextCodePoint(isolate, args, index);
+    if (code < 0) {
+      return isolate->heap()->exception();
     }
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, result, isolate->factory()->NewRawOneByteString(length));
-    do {
-      Handle<SeqOneByteString>::cast(result)->Set(index, code);
-      if (++index == length) break;
-      if (!ToUint16(args.at<Object>(1 + index), &code)) {
-        return isolate->heap()->exception();
-      }
-    } while (code <= String::kMaxOneByteCharCodeU);
+    if (code > String::kMaxOneByteCharCode) {
+      break;
+    }
+    one_byte_buffer.Add(code);
   }
-  // Check if all characters fit into the one byte range.
-  if (index < length) {
-    // Fallback to two byte string.
-    Handle<String> new_result;
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, new_result, isolate->factory()->NewRawTwoByteString(length));
-    for (int new_index = 0; new_index < index; ++new_index) {
-      uint16_t new_code =
-          Handle<SeqOneByteString>::cast(result)->Get(new_index);
-      Handle<SeqTwoByteString>::cast(new_result)->Set(new_index, new_code);
-    }
-    while (true) {
-      Handle<SeqTwoByteString>::cast(new_result)->Set(index, code);
-      if (++index == length) break;
-      if (!ToUint16(args.at<Object>(1 + index), &code)) {
-        return isolate->heap()->exception();
-      }
-    }
-    result = new_result;
+
+  if (index == length) {
+    RETURN_RESULT_OR_FAILURE(isolate, isolate->factory()->NewStringFromOneByte(
+                                          one_byte_buffer.ToConstVector()));
   }
+
+  List<uc16> two_byte_buffer(length - index);
+
+  while (true) {
+    if (code <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
+      two_byte_buffer.Add(code);
+    } else {
+      two_byte_buffer.Add(unibrow::Utf16::LeadSurrogate(code));
+      two_byte_buffer.Add(unibrow::Utf16::TrailSurrogate(code));
+    }
+
+    if (++index == length) {
+      break;
+    }
+    code = NextCodePoint(isolate, args, index);
+    if (code < 0) {
+      return isolate->heap()->exception();
+    }
+  }
+
+  Handle<SeqTwoByteString> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result,
+      isolate->factory()->NewRawTwoByteString(one_byte_buffer.length() +
+                                              two_byte_buffer.length()));
+
+  CopyChars(result->GetChars(), one_byte_buffer.ToConstVector().start(),
+            one_byte_buffer.length());
+  CopyChars(result->GetChars() + one_byte_buffer.length(),
+            two_byte_buffer.ToConstVector().start(), two_byte_buffer.length());
+
   return *result;
 }
 
@@ -4552,6 +5091,27 @@
   assembler->Return(result);
 }
 
+// ES6 section 21.1.3.25 String.prototype.trim ()
+BUILTIN(StringPrototypeTrim) {
+  HandleScope scope(isolate);
+  TO_THIS_STRING(string, "String.prototype.trim");
+  return *String::Trim(string, String::kTrim);
+}
+
+// Non-standard WebKit extension
+BUILTIN(StringPrototypeTrimLeft) {
+  HandleScope scope(isolate);
+  TO_THIS_STRING(string, "String.prototype.trimLeft");
+  return *String::Trim(string, String::kTrimLeft);
+}
+
+// Non-standard WebKit extension
+BUILTIN(StringPrototypeTrimRight) {
+  HandleScope scope(isolate);
+  TO_THIS_STRING(string, "String.prototype.trimRight");
+  return *String::Trim(string, String::kTrimRight);
+}
+
 // -----------------------------------------------------------------------------
 // ES6 section 21.1 ArrayBuffer Objects
 
@@ -4629,10 +5189,7 @@
   DCHECK(isolate->proxy_function()->IsConstructor());
   Handle<Object> target = args.atOrUndefined(isolate, 1);
   Handle<Object> handler = args.atOrUndefined(isolate, 2);
-  Handle<JSProxy> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     JSProxy::New(isolate, target, handler));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, JSProxy::New(isolate, target, handler));
 }
 
 
@@ -4661,25 +5218,38 @@
 
 namespace {
 
+// Returns the holder JSObject if the function can legally be called with this
+// receiver.  Returns nullptr if the call is illegal.
+// TODO(dcarney): CallOptimization duplicates this logic, merge.
+JSObject* GetCompatibleReceiver(Isolate* isolate, FunctionTemplateInfo* info,
+                                JSObject* receiver) {
+  Object* recv_type = info->signature();
+  // No signature, return holder.
+  if (!recv_type->IsFunctionTemplateInfo()) return receiver;
+  FunctionTemplateInfo* signature = FunctionTemplateInfo::cast(recv_type);
+
+  // Check the receiver. Fast path for receivers with no hidden prototypes.
+  if (signature->IsTemplateFor(receiver)) return receiver;
+  if (!receiver->map()->has_hidden_prototype()) return nullptr;
+  for (PrototypeIterator iter(isolate, receiver, kStartAtPrototype,
+                              PrototypeIterator::END_AT_NON_HIDDEN);
+       !iter.IsAtEnd(); iter.Advance()) {
+    JSObject* current = iter.GetCurrent<JSObject>();
+    if (signature->IsTemplateFor(current)) return current;
+  }
+  return nullptr;
+}
+
+template <bool is_construct>
 MUST_USE_RESULT MaybeHandle<Object> HandleApiCallHelper(
-    Isolate* isolate,
-    BuiltinArguments<BuiltinExtraArguments::kTargetAndNewTarget> args) {
-  HandleScope scope(isolate);
-  Handle<HeapObject> function = args.target<HeapObject>();
-  Handle<HeapObject> new_target = args.new_target();
-  bool is_construct = !new_target->IsUndefined();
-  Handle<JSReceiver> receiver;
-
-  DCHECK(function->IsFunctionTemplateInfo() ||
-         Handle<JSFunction>::cast(function)->shared()->IsApiFunction());
-
-  Handle<FunctionTemplateInfo> fun_data =
-      function->IsFunctionTemplateInfo()
-          ? Handle<FunctionTemplateInfo>::cast(function)
-          : handle(JSFunction::cast(*function)->shared()->get_api_func_data());
+    Isolate* isolate, Handle<HeapObject> function,
+    Handle<HeapObject> new_target, Handle<FunctionTemplateInfo> fun_data,
+    Handle<Object> receiver, BuiltinArguments args) {
+  Handle<JSObject> js_receiver;
+  JSObject* raw_holder;
   if (is_construct) {
-    DCHECK(args.receiver()->IsTheHole());
-    if (fun_data->instance_template()->IsUndefined()) {
+    DCHECK(args.receiver()->IsTheHole(isolate));
+    if (fun_data->instance_template()->IsUndefined(isolate)) {
       v8::Local<ObjectTemplate> templ =
           ObjectTemplate::New(reinterpret_cast<v8::Isolate*>(isolate),
                               ToApiHandle<v8::FunctionTemplate>(fun_data));
@@ -4688,37 +5258,43 @@
     Handle<ObjectTemplateInfo> instance_template(
         ObjectTemplateInfo::cast(fun_data->instance_template()), isolate);
     ASSIGN_RETURN_ON_EXCEPTION(
-        isolate, receiver,
+        isolate, js_receiver,
         ApiNatives::InstantiateObject(instance_template,
                                       Handle<JSReceiver>::cast(new_target)),
         Object);
-    args[0] = *receiver;
-    DCHECK_EQ(*receiver, *args.receiver());
-  } else {
-    DCHECK(args.receiver()->IsJSReceiver());
-    receiver = args.at<JSReceiver>(0);
-  }
+    args[0] = *js_receiver;
+    DCHECK_EQ(*js_receiver, *args.receiver());
 
-  if (!is_construct && !fun_data->accept_any_receiver()) {
-    if (receiver->IsJSObject() && receiver->IsAccessCheckNeeded()) {
-      Handle<JSObject> js_receiver = Handle<JSObject>::cast(receiver);
-      if (!isolate->MayAccess(handle(isolate->context()), js_receiver)) {
-        isolate->ReportFailedAccessCheck(js_receiver);
-        RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
-      }
+    raw_holder = *js_receiver;
+  } else {
+    DCHECK(receiver->IsJSReceiver());
+
+    if (!receiver->IsJSObject()) {
+      // This function cannot be called with the given receiver.  Abort!
+      THROW_NEW_ERROR(
+          isolate, NewTypeError(MessageTemplate::kIllegalInvocation), Object);
+    }
+
+    js_receiver = Handle<JSObject>::cast(receiver);
+
+    if (!fun_data->accept_any_receiver() &&
+        js_receiver->IsAccessCheckNeeded() &&
+        !isolate->MayAccess(handle(isolate->context()), js_receiver)) {
+      isolate->ReportFailedAccessCheck(js_receiver);
+      RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+    }
+
+    raw_holder = GetCompatibleReceiver(isolate, *fun_data, *js_receiver);
+
+    if (raw_holder == nullptr) {
+      // This function cannot be called with the given receiver.  Abort!
+      THROW_NEW_ERROR(
+          isolate, NewTypeError(MessageTemplate::kIllegalInvocation), Object);
     }
   }
 
-  Object* raw_holder = fun_data->GetCompatibleReceiver(isolate, *receiver);
-
-  if (raw_holder->IsNull()) {
-    // This function cannot be called with the given receiver.  Abort!
-    THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIllegalInvocation),
-                    Object);
-  }
-
   Object* raw_call_data = fun_data->call_code();
-  if (!raw_call_data->IsUndefined()) {
+  if (!raw_call_data->IsUndefined(isolate)) {
     DCHECK(raw_call_data->IsCallHandlerInfo());
     CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
     Object* callback_obj = call_data->callback();
@@ -4726,23 +5302,25 @@
         v8::ToCData<v8::FunctionCallback>(callback_obj);
     Object* data_obj = call_data->data();
 
-    LOG(isolate, ApiObjectAccess("call", JSObject::cast(*args.receiver())));
-    DCHECK(raw_holder->IsJSObject());
+    LOG(isolate, ApiObjectAccess("call", JSObject::cast(*js_receiver)));
 
     FunctionCallbackArguments custom(isolate, data_obj, *function, raw_holder,
                                      *new_target, &args[0] - 1,
                                      args.length() - 1);
 
     Handle<Object> result = custom.Call(callback);
-    if (result.is_null()) result = isolate->factory()->undefined_value();
 
     RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
-    if (!is_construct || result->IsJSObject()) {
-      return scope.CloseAndEscape(result);
+    if (result.is_null()) {
+      if (is_construct) return js_receiver;
+      return isolate->factory()->undefined_value();
     }
+    // Rebox the result.
+    result->VerifyApiCallResultType();
+    if (!is_construct || result->IsJSObject()) return handle(*result, isolate);
   }
 
-  return scope.CloseAndEscape(receiver);
+  return js_receiver;
 }
 
 }  // namespace
@@ -4750,10 +5328,20 @@
 
 BUILTIN(HandleApiCall) {
   HandleScope scope(isolate);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     HandleApiCallHelper(isolate, args));
-  return *result;
+  Handle<JSFunction> function = args.target<JSFunction>();
+  Handle<Object> receiver = args.receiver();
+  Handle<HeapObject> new_target = args.new_target();
+  Handle<FunctionTemplateInfo> fun_data(function->shared()->get_api_func_data(),
+                                        isolate);
+  if (new_target->IsJSReceiver()) {
+    RETURN_RESULT_OR_FAILURE(
+        isolate, HandleApiCallHelper<true>(isolate, function, new_target,
+                                           fun_data, receiver, args));
+  } else {
+    RETURN_RESULT_OR_FAILURE(
+        isolate, HandleApiCallHelper<false>(isolate, function, new_target,
+                                            fun_data, receiver, args));
+  }
 }
 
 
@@ -4837,14 +5425,10 @@
 
 namespace {
 
-class RelocatableArguments
-    : public BuiltinArguments<BuiltinExtraArguments::kTargetAndNewTarget>,
-      public Relocatable {
+class RelocatableArguments : public BuiltinArguments, public Relocatable {
  public:
   RelocatableArguments(Isolate* isolate, int length, Object** arguments)
-      : BuiltinArguments<BuiltinExtraArguments::kTargetAndNewTarget>(length,
-                                                                     arguments),
-        Relocatable(isolate) {}
+      : BuiltinArguments(length, arguments), Relocatable(isolate) {}
 
   virtual inline void IterateInstance(ObjectVisitor* v) {
     if (length() == 0) return;
@@ -4857,14 +5441,17 @@
 
 }  // namespace
 
-MaybeHandle<Object> Builtins::InvokeApiFunction(Handle<HeapObject> function,
+MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
+                                                Handle<HeapObject> function,
                                                 Handle<Object> receiver,
                                                 int argc,
                                                 Handle<Object> args[]) {
-  Isolate* isolate = function->GetIsolate();
+  DCHECK(function->IsFunctionTemplateInfo() ||
+         (function->IsJSFunction() &&
+          JSFunction::cast(*function)->shared()->IsApiFunction()));
+
   // Do proper receiver conversion for non-strict mode api functions.
   if (!receiver->IsJSReceiver()) {
-    DCHECK(function->IsFunctionTemplateInfo() || function->IsJSFunction());
     if (function->IsFunctionTemplateInfo() ||
         is_sloppy(JSFunction::cast(*function)->shared()->language_mode())) {
       ASSIGN_RETURN_ON_EXCEPTION(isolate, receiver,
@@ -4872,6 +5459,13 @@
                                  Object);
     }
   }
+
+  Handle<FunctionTemplateInfo> fun_data =
+      function->IsFunctionTemplateInfo()
+          ? Handle<FunctionTemplateInfo>::cast(function)
+          : handle(JSFunction::cast(*function)->shared()->get_api_func_data(),
+                   isolate);
+  Handle<HeapObject> new_target = isolate->factory()->undefined_value();
   // Construct BuiltinArguments object:
   // new target, function, arguments reversed, receiver.
   const int kBufferSize = 32;
@@ -4887,15 +5481,14 @@
     argv[argc - i + 1] = *args[i];
   }
   argv[1] = *function;
-  argv[0] = isolate->heap()->undefined_value();  // new target
+  argv[0] = *new_target;
   MaybeHandle<Object> result;
   {
     RelocatableArguments arguments(isolate, argc + 3, &argv[argc] + 2);
-    result = HandleApiCallHelper(isolate, arguments);
+    result = HandleApiCallHelper<false>(isolate, function, new_target, fun_data,
+                                        receiver, arguments);
   }
-  if (argv != small_argv) {
-    delete[] argv;
-  }
+  if (argv != small_argv) delete[] argv;
   return result;
 }
 
@@ -4904,8 +5497,7 @@
 // API. The object can be called as either a constructor (using new) or just as
 // a function (without new).
 MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
-    Isolate* isolate, bool is_construct_call,
-    BuiltinArguments<BuiltinExtraArguments::kNone> args) {
+    Isolate* isolate, bool is_construct_call, BuiltinArguments args) {
   Handle<Object> receiver = args.receiver();
 
   // Get the object called.
@@ -4931,7 +5523,7 @@
   CHECK(constructor->shared()->IsApiFunction());
   Object* handler =
       constructor->shared()->get_api_func_data()->instance_call_handler();
-  DCHECK(!handler->IsUndefined());
+  DCHECK(!handler->IsUndefined(isolate));
   // TODO(ishell): remove this debugging code.
   CHECK(handler->IsCallHandlerInfo());
   CallHandlerInfo* call_data = CallHandlerInfo::cast(handler);
@@ -4974,106 +5566,122 @@
   return HandleApiCallAsFunctionOrConstructor(isolate, true, args);
 }
 
+namespace {
 
-static void Generate_LoadIC_Miss(MacroAssembler* masm) {
-  LoadIC::GenerateMiss(masm);
+void Generate_LoadIC_Miss(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(0);
+  Node* name = assembler->Parameter(1);
+  Node* slot = assembler->Parameter(2);
+  Node* vector = assembler->Parameter(3);
+  Node* context = assembler->Parameter(4);
+
+  assembler->TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name,
+                             slot, vector);
 }
 
+void Generate_LoadGlobalIC_Miss(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
 
-static void Generate_LoadIC_Normal(MacroAssembler* masm) {
+  Node* slot = assembler->Parameter(0);
+  Node* vector = assembler->Parameter(1);
+  Node* context = assembler->Parameter(2);
+
+  assembler->TailCallRuntime(Runtime::kLoadGlobalIC_Miss, context, slot,
+                             vector);
+}
+
+void Generate_LoadIC_Normal(MacroAssembler* masm) {
   LoadIC::GenerateNormal(masm);
 }
 
-
-static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
+void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
   NamedLoadHandlerCompiler::GenerateLoadViaGetterForDeopt(masm);
 }
 
+void Generate_LoadIC_Slow(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
 
-static void Generate_LoadIC_Slow(MacroAssembler* masm) {
-  LoadIC::GenerateRuntimeGetProperty(masm);
+  Node* receiver = assembler->Parameter(0);
+  Node* name = assembler->Parameter(1);
+  // Node* slot = assembler->Parameter(2);
+  // Node* vector = assembler->Parameter(3);
+  Node* context = assembler->Parameter(4);
+
+  assembler->TailCallRuntime(Runtime::kGetProperty, context, receiver, name);
 }
 
+void Generate_LoadGlobalIC_Slow(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
 
-static void Generate_KeyedLoadIC_Slow(MacroAssembler* masm) {
+  Node* slot = assembler->Parameter(0);
+  Node* vector = assembler->Parameter(1);
+  Node* context = assembler->Parameter(2);
+
+  assembler->TailCallRuntime(Runtime::kLoadGlobalIC_Slow, context, slot,
+                             vector);
+}
+
+void Generate_KeyedLoadIC_Slow(MacroAssembler* masm) {
   KeyedLoadIC::GenerateRuntimeGetProperty(masm);
 }
 
-
-static void Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
+void Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
   KeyedLoadIC::GenerateMiss(masm);
 }
 
-
-static void Generate_KeyedLoadIC_Megamorphic(MacroAssembler* masm) {
+void Generate_KeyedLoadIC_Megamorphic(MacroAssembler* masm) {
   KeyedLoadIC::GenerateMegamorphic(masm);
 }
 
-
-static void Generate_StoreIC_Miss(MacroAssembler* masm) {
+void Generate_StoreIC_Miss(MacroAssembler* masm) {
   StoreIC::GenerateMiss(masm);
 }
 
-
-static void Generate_StoreIC_Normal(MacroAssembler* masm) {
+void Generate_StoreIC_Normal(MacroAssembler* masm) {
   StoreIC::GenerateNormal(masm);
 }
 
-
-static void Generate_StoreIC_Slow(MacroAssembler* masm) {
+void Generate_StoreIC_Slow(MacroAssembler* masm) {
   NamedStoreHandlerCompiler::GenerateSlow(masm);
 }
 
-
-static void Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
+void Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
   ElementHandlerCompiler::GenerateStoreSlow(masm);
 }
 
-
-static void Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
+void Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
   NamedStoreHandlerCompiler::GenerateStoreViaSetterForDeopt(masm);
 }
 
-static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) {
-  StoreIC::GenerateMegamorphic(masm);
-}
-
-static void Generate_StoreIC_Megamorphic_Strict(MacroAssembler* masm) {
-  StoreIC::GenerateMegamorphic(masm);
-}
-
-
-static void Generate_KeyedStoreIC_Megamorphic(MacroAssembler* masm) {
+void Generate_KeyedStoreIC_Megamorphic(MacroAssembler* masm) {
   KeyedStoreIC::GenerateMegamorphic(masm, SLOPPY);
 }
 
-
-static void Generate_KeyedStoreIC_Megamorphic_Strict(MacroAssembler* masm) {
+void Generate_KeyedStoreIC_Megamorphic_Strict(MacroAssembler* masm) {
   KeyedStoreIC::GenerateMegamorphic(masm, STRICT);
 }
 
-
-static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
+void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
   KeyedStoreIC::GenerateMiss(masm);
 }
 
-
-static void Generate_Return_DebugBreak(MacroAssembler* masm) {
+void Generate_Return_DebugBreak(MacroAssembler* masm) {
   DebugCodegen::GenerateDebugBreakStub(masm,
                                        DebugCodegen::SAVE_RESULT_REGISTER);
 }
 
-
-static void Generate_Slot_DebugBreak(MacroAssembler* masm) {
+void Generate_Slot_DebugBreak(MacroAssembler* masm) {
   DebugCodegen::GenerateDebugBreakStub(masm,
                                        DebugCodegen::IGNORE_RESULT_REGISTER);
 }
 
-
-static void Generate_FrameDropper_LiveEdit(MacroAssembler* masm) {
+void Generate_FrameDropper_LiveEdit(MacroAssembler* masm) {
   DebugCodegen::GenerateFrameDropperLiveEdit(masm);
 }
 
+}  // namespace
 
 Builtins::Builtins() : initialized_(false) {
   memset(builtins_, 0, sizeof(builtins_[0]) * builtin_count);
@@ -5084,8 +5692,7 @@
 Builtins::~Builtins() {
 }
 
-
-#define DEF_ENUM_C(name, ignore) FUNCTION_ADDR(Builtin_##name),
+#define DEF_ENUM_C(name) FUNCTION_ADDR(Builtin_##name),
 Address const Builtins::c_functions_[cfunction_count] = {
   BUILTIN_LIST_C(DEF_ENUM_C)
 };
@@ -5099,7 +5706,6 @@
   const char* s_name;  // name is only used for generating log information.
   int name;
   Code::Flags flags;
-  BuiltinExtraArguments extra_args;
   int argc;
 };
 
@@ -5144,13 +5750,13 @@
   MacroAssembler masm(isolate, u.buffer, sizeof(u.buffer),
                       CodeObjectRequired::kYes);
   // Generate the code/adaptor.
-  typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
+  typedef void (*Generator)(MacroAssembler*, int);
   Generator g = FUNCTION_CAST<Generator>(builtin_desc->generator);
   // We pass all arguments to the generator, but it may not use all of
   // them.  This works because the first arguments are on top of the
   // stack.
   DCHECK(!masm.has_frame());
-  g(&masm, builtin_desc->name, builtin_desc->extra_args);
+  g(&masm, builtin_desc->name);
   // Move the code into the object heap.
   CodeDesc desc;
   masm.GetCode(&desc);
@@ -5158,8 +5764,9 @@
   return isolate->factory()->NewCode(desc, flags, masm.CodeObject());
 }
 
-Handle<Code> CodeStubAssemblerBuilder(Isolate* isolate,
-                                      BuiltinDesc const* builtin_desc) {
+// Builder for builtins implemented in TurboFan with JS linkage.
+Handle<Code> CodeStubAssemblerBuilderJS(Isolate* isolate,
+                                        BuiltinDesc const* builtin_desc) {
   Zone zone(isolate->allocator());
   CodeStubAssembler assembler(isolate, &zone, builtin_desc->argc,
                               builtin_desc->flags, builtin_desc->s_name);
@@ -5170,6 +5777,25 @@
   return assembler.GenerateCode();
 }
 
+// Builder for builtins implemented in TurboFan with CallStub linkage.
+Handle<Code> CodeStubAssemblerBuilderCS(Isolate* isolate,
+                                        BuiltinDesc const* builtin_desc) {
+  Zone zone(isolate->allocator());
+  // The interface descriptor with given key must be initialized at this point
+  // and this construction just queries the details from the descriptors table.
+  CallInterfaceDescriptor descriptor(
+      isolate, static_cast<CallDescriptors::Key>(builtin_desc->argc));
+  // Ensure descriptor is already initialized.
+  DCHECK_NOT_NULL(descriptor.GetFunctionType());
+  CodeStubAssembler assembler(isolate, &zone, descriptor, builtin_desc->flags,
+                              builtin_desc->s_name);
+  // Generate the code/adaptor.
+  typedef void (*Generator)(CodeStubAssembler*);
+  Generator g = FUNCTION_CAST<Generator>(builtin_desc->generator);
+  g(&assembler);
+  return assembler.GenerateCode();
+}
+
 }  // namespace
 
 // Define array of pointers to generators and C builtin functions.
@@ -5184,41 +5810,46 @@
   functions[builtin_count].s_name = nullptr;
   functions[builtin_count].name = builtin_count;
   functions[builtin_count].flags = static_cast<Code::Flags>(0);
-  functions[builtin_count].extra_args = BuiltinExtraArguments::kNone;
   functions[builtin_count].argc = 0;
 
-#define DEF_FUNCTION_PTR_C(aname, aextra_args)                \
-  functions->builder = &MacroAssemblerBuilder;                \
-  functions->generator = FUNCTION_ADDR(Generate_Adaptor);     \
-  functions->c_code = FUNCTION_ADDR(Builtin_##aname);         \
-  functions->s_name = #aname;                                 \
-  functions->name = c_##aname;                                \
-  functions->flags = Code::ComputeFlags(Code::BUILTIN);       \
-  functions->extra_args = BuiltinExtraArguments::aextra_args; \
-  functions->argc = 0;                                        \
+#define DEF_FUNCTION_PTR_C(aname)                         \
+  functions->builder = &MacroAssemblerBuilder;            \
+  functions->generator = FUNCTION_ADDR(Generate_Adaptor); \
+  functions->c_code = FUNCTION_ADDR(Builtin_##aname);     \
+  functions->s_name = #aname;                             \
+  functions->name = c_##aname;                            \
+  functions->flags = Code::ComputeFlags(Code::BUILTIN);   \
+  functions->argc = 0;                                    \
   ++functions;
 
-#define DEF_FUNCTION_PTR_A(aname, kind, state, extra)              \
-  functions->builder = &MacroAssemblerBuilder;                     \
-  functions->generator = FUNCTION_ADDR(Generate_##aname);          \
-  functions->c_code = NULL;                                        \
-  functions->s_name = #aname;                                      \
-  functions->name = k##aname;                                      \
-  functions->flags = Code::ComputeFlags(Code::kind, state, extra); \
-  functions->extra_args = BuiltinExtraArguments::kNone;            \
-  functions->argc = 0;                                             \
+#define DEF_FUNCTION_PTR_A(aname, kind, extra)              \
+  functions->builder = &MacroAssemblerBuilder;              \
+  functions->generator = FUNCTION_ADDR(Generate_##aname);   \
+  functions->c_code = NULL;                                 \
+  functions->s_name = #aname;                               \
+  functions->name = k##aname;                               \
+  functions->flags = Code::ComputeFlags(Code::kind, extra); \
+  functions->argc = 0;                                      \
   ++functions;
 
-#define DEF_FUNCTION_PTR_T(aname, aargc)                                 \
-  functions->builder = &CodeStubAssemblerBuilder;                        \
-  functions->generator = FUNCTION_ADDR(Generate_##aname);                \
-  functions->c_code = NULL;                                              \
-  functions->s_name = #aname;                                            \
-  functions->name = k##aname;                                            \
-  functions->flags =                                                     \
-      Code::ComputeFlags(Code::BUILTIN, UNINITIALIZED, kNoExtraICState); \
-  functions->extra_args = BuiltinExtraArguments::kNone;                  \
-  functions->argc = aargc;                                               \
+#define DEF_FUNCTION_PTR_T(aname, aargc)                  \
+  functions->builder = &CodeStubAssemblerBuilderJS;       \
+  functions->generator = FUNCTION_ADDR(Generate_##aname); \
+  functions->c_code = NULL;                               \
+  functions->s_name = #aname;                             \
+  functions->name = k##aname;                             \
+  functions->flags = Code::ComputeFlags(Code::BUILTIN);   \
+  functions->argc = aargc;                                \
+  ++functions;
+
+#define DEF_FUNCTION_PTR_S(aname, kind, extra, interface_descriptor) \
+  functions->builder = &CodeStubAssemblerBuilderCS;                  \
+  functions->generator = FUNCTION_ADDR(Generate_##aname);            \
+  functions->c_code = NULL;                                          \
+  functions->s_name = #aname;                                        \
+  functions->name = k##aname;                                        \
+  functions->flags = Code::ComputeFlags(Code::kind, extra);          \
+  functions->argc = CallDescriptors::interface_descriptor;           \
   ++functions;
 
 #define DEF_FUNCTION_PTR_H(aname, kind)                     \
@@ -5228,20 +5859,21 @@
   functions->s_name = #aname;                               \
   functions->name = k##aname;                               \
   functions->flags = Code::ComputeHandlerFlags(Code::kind); \
-  functions->extra_args = BuiltinExtraArguments::kNone;     \
   functions->argc = 0;                                      \
   ++functions;
 
   BUILTIN_LIST_C(DEF_FUNCTION_PTR_C)
   BUILTIN_LIST_A(DEF_FUNCTION_PTR_A)
   BUILTIN_LIST_T(DEF_FUNCTION_PTR_T)
+  BUILTIN_LIST_S(DEF_FUNCTION_PTR_S)
   BUILTIN_LIST_H(DEF_FUNCTION_PTR_H)
   BUILTIN_LIST_DEBUG_A(DEF_FUNCTION_PTR_A)
 
 #undef DEF_FUNCTION_PTR_C
 #undef DEF_FUNCTION_PTR_A
-#undef DEF_FUNCTION_PTR_H
 #undef DEF_FUNCTION_PTR_T
+#undef DEF_FUNCTION_PTR_S
+#undef DEF_FUNCTION_PTR_H
 }
 
 
@@ -5251,6 +5883,11 @@
   // Create a scope for the handles in the builtins.
   HandleScope scope(isolate);
 
+#define INITIALIZE_CALL_DESCRIPTOR(name, kind, extra, interface_descriptor) \
+  { interface_descriptor##Descriptor descriptor(isolate); }
+  BUILTIN_LIST_S(INITIALIZE_CALL_DESCRIPTOR)
+#undef INITIALIZE_CALL_DESCRIPTOR
+
   const BuiltinDesc* functions = builtin_function_table.functions();
 
   // Traverse the list of builtins and generate an adaptor in a
@@ -5260,8 +5897,8 @@
       Handle<Code> code = (*functions[i].builder)(isolate, functions + i);
       // Log the event and add the code to the builtins array.
       PROFILE(isolate,
-              CodeCreateEvent(Logger::BUILTIN_TAG, AbstractCode::cast(*code),
-                              functions[i].s_name));
+              CodeCreateEvent(CodeEventListener::BUILTIN_TAG,
+                              AbstractCode::cast(*code), functions[i].s_name));
       builtins_[i] = *code;
       code->set_builtin_index(i);
 #ifdef ENABLE_DISASSEMBLER
@@ -5555,23 +6192,26 @@
   a->Return(a->Int32Constant(0));
 }
 
-#define DEFINE_BUILTIN_ACCESSOR_C(name, ignore)               \
-Handle<Code> Builtins::name() {                               \
-  Code** code_address =                                       \
-      reinterpret_cast<Code**>(builtin_address(k##name));     \
-  return Handle<Code>(code_address);                          \
-}
-#define DEFINE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
-Handle<Code> Builtins::name() {                             \
-  Code** code_address =                                     \
-      reinterpret_cast<Code**>(builtin_address(k##name));   \
-  return Handle<Code>(code_address);                        \
-}
+#define DEFINE_BUILTIN_ACCESSOR_C(name)                                       \
+  Handle<Code> Builtins::name() {                                             \
+    Code** code_address = reinterpret_cast<Code**>(builtin_address(k##name)); \
+    return Handle<Code>(code_address);                                        \
+  }
+#define DEFINE_BUILTIN_ACCESSOR_A(name, kind, extra)                          \
+  Handle<Code> Builtins::name() {                                             \
+    Code** code_address = reinterpret_cast<Code**>(builtin_address(k##name)); \
+    return Handle<Code>(code_address);                                        \
+  }
 #define DEFINE_BUILTIN_ACCESSOR_T(name, argc)                                 \
   Handle<Code> Builtins::name() {                                             \
     Code** code_address = reinterpret_cast<Code**>(builtin_address(k##name)); \
     return Handle<Code>(code_address);                                        \
   }
+#define DEFINE_BUILTIN_ACCESSOR_S(name, kind, extra, interface_descriptor)    \
+  Handle<Code> Builtins::name() {                                             \
+    Code** code_address = reinterpret_cast<Code**>(builtin_address(k##name)); \
+    return Handle<Code>(code_address);                                        \
+  }
 #define DEFINE_BUILTIN_ACCESSOR_H(name, kind)               \
 Handle<Code> Builtins::name() {                             \
   Code** code_address =                                     \
@@ -5581,11 +6221,13 @@
 BUILTIN_LIST_C(DEFINE_BUILTIN_ACCESSOR_C)
 BUILTIN_LIST_A(DEFINE_BUILTIN_ACCESSOR_A)
 BUILTIN_LIST_T(DEFINE_BUILTIN_ACCESSOR_T)
+BUILTIN_LIST_S(DEFINE_BUILTIN_ACCESSOR_S)
 BUILTIN_LIST_H(DEFINE_BUILTIN_ACCESSOR_H)
 BUILTIN_LIST_DEBUG_A(DEFINE_BUILTIN_ACCESSOR_A)
 #undef DEFINE_BUILTIN_ACCESSOR_C
 #undef DEFINE_BUILTIN_ACCESSOR_A
 #undef DEFINE_BUILTIN_ACCESSOR_T
+#undef DEFINE_BUILTIN_ACCESSOR_S
 #undef DEFINE_BUILTIN_ACCESSOR_H
 
 }  // namespace internal
diff --git a/src/builtins.h b/src/builtins.h
index ff1d77d..cbce375 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -1,3 +1,4 @@
+
 // Copyright 2011 the V8 project authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
@@ -14,19 +15,6 @@
 // Forward declarations.
 class CodeStubAssembler;
 
-// Specifies extra arguments required by a C++ builtin.
-enum class BuiltinExtraArguments : uint8_t {
-  kNone = 0u,
-  kTarget = 1u << 0,
-  kNewTarget = 1u << 1,
-  kTargetAndNewTarget = kTarget | kNewTarget
-};
-
-inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
-  return static_cast<uint8_t>(lhs) & static_cast<uint8_t>(rhs);
-}
-
-
 #define CODE_AGE_LIST_WITH_ARG(V, A)     \
   V(Quadragenarian, A)                   \
   V(Quinquagenarian, A)                  \
@@ -46,290 +34,317 @@
   V(NoAge)                                         \
   CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
 
-#define DECLARE_CODE_AGE_BUILTIN(C, V)             \
-  V(Make##C##CodeYoungAgainOddMarking, BUILTIN,    \
-    UNINITIALIZED, kNoExtraICState)                \
-  V(Make##C##CodeYoungAgainEvenMarking, BUILTIN,   \
-    UNINITIALIZED, kNoExtraICState)
-
+#define DECLARE_CODE_AGE_BUILTIN(C, V)                           \
+  V(Make##C##CodeYoungAgainOddMarking, BUILTIN, kNoExtraICState) \
+  V(Make##C##CodeYoungAgainEvenMarking, BUILTIN, kNoExtraICState)
 
 // Define list of builtins implemented in C++.
-#define BUILTIN_LIST_C(V)                                      \
-  V(Illegal, kNone)                                            \
-                                                               \
-  V(EmptyFunction, kNone)                                      \
-                                                               \
-  V(ArrayConcat, kNone)                                        \
-  V(ArrayPop, kNone)                                           \
-  V(ArrayPush, kNone)                                          \
-  V(ArrayShift, kNone)                                         \
-  V(ArraySlice, kNone)                                         \
-  V(ArraySplice, kNone)                                        \
-  V(ArrayUnshift, kNone)                                       \
-                                                               \
-  V(ArrayBufferConstructor, kTarget)                           \
-  V(ArrayBufferConstructor_ConstructStub, kTargetAndNewTarget) \
-  V(ArrayBufferIsView, kNone)                                  \
-                                                               \
-  V(BooleanConstructor, kNone)                                 \
-  V(BooleanConstructor_ConstructStub, kTargetAndNewTarget)     \
-  V(BooleanPrototypeToString, kNone)                           \
-  V(BooleanPrototypeValueOf, kNone)                            \
-                                                               \
-  V(DataViewConstructor, kNone)                                \
-  V(DataViewConstructor_ConstructStub, kTargetAndNewTarget)    \
-                                                               \
-  V(DateConstructor, kNone)                                    \
-  V(DateConstructor_ConstructStub, kTargetAndNewTarget)        \
-  V(DateNow, kNone)                                            \
-  V(DateParse, kNone)                                          \
-  V(DateUTC, kNone)                                            \
-  V(DatePrototypeSetDate, kNone)                               \
-  V(DatePrototypeSetFullYear, kNone)                           \
-  V(DatePrototypeSetHours, kNone)                              \
-  V(DatePrototypeSetMilliseconds, kNone)                       \
-  V(DatePrototypeSetMinutes, kNone)                            \
-  V(DatePrototypeSetMonth, kNone)                              \
-  V(DatePrototypeSetSeconds, kNone)                            \
-  V(DatePrototypeSetTime, kNone)                               \
-  V(DatePrototypeSetUTCDate, kNone)                            \
-  V(DatePrototypeSetUTCFullYear, kNone)                        \
-  V(DatePrototypeSetUTCHours, kNone)                           \
-  V(DatePrototypeSetUTCMilliseconds, kNone)                    \
-  V(DatePrototypeSetUTCMinutes, kNone)                         \
-  V(DatePrototypeSetUTCMonth, kNone)                           \
-  V(DatePrototypeSetUTCSeconds, kNone)                         \
-  V(DatePrototypeToDateString, kNone)                          \
-  V(DatePrototypeToISOString, kNone)                           \
-  V(DatePrototypeToPrimitive, kNone)                           \
-  V(DatePrototypeToUTCString, kNone)                           \
-  V(DatePrototypeToString, kNone)                              \
-  V(DatePrototypeToTimeString, kNone)                          \
-  V(DatePrototypeValueOf, kNone)                               \
-  V(DatePrototypeGetYear, kNone)                               \
-  V(DatePrototypeSetYear, kNone)                               \
-                                                               \
-  V(FunctionConstructor, kTargetAndNewTarget)                  \
-  V(FunctionPrototypeBind, kNone)                              \
-  V(FunctionPrototypeToString, kNone)                          \
-                                                               \
-  V(GeneratorFunctionConstructor, kTargetAndNewTarget)         \
-  V(AsyncFunctionConstructor, kTargetAndNewTarget)             \
-                                                               \
-  V(GlobalEncodeURI, kNone)                                    \
-  V(GlobalEncodeURIComponent, kNone)                           \
-                                                               \
-  V(GlobalEval, kTarget)                                       \
-                                                               \
-  V(MathAcos, kNone)                                           \
-  V(MathAsin, kNone)                                           \
-  V(MathAtan, kNone)                                           \
-  V(MathFround, kNone)                                         \
-  V(MathImul, kNone)                                           \
-                                                               \
-  V(ObjectAssign, kNone)                                       \
-  V(ObjectCreate, kNone)                                       \
-  V(ObjectDefineGetter, kNone)                                 \
-  V(ObjectDefineProperties, kNone)                             \
-  V(ObjectDefineProperty, kNone)                               \
-  V(ObjectDefineSetter, kNone)                                 \
-  V(ObjectEntries, kNone)                                      \
-  V(ObjectFreeze, kNone)                                       \
-  V(ObjectGetOwnPropertyDescriptor, kNone)                     \
-  V(ObjectGetOwnPropertyDescriptors, kNone)                    \
-  V(ObjectGetOwnPropertyNames, kNone)                          \
-  V(ObjectGetOwnPropertySymbols, kNone)                        \
-  V(ObjectGetPrototypeOf, kNone)                               \
-  V(ObjectIs, kNone)                                           \
-  V(ObjectIsExtensible, kNone)                                 \
-  V(ObjectIsFrozen, kNone)                                     \
-  V(ObjectIsSealed, kNone)                                     \
-  V(ObjectKeys, kNone)                                         \
-  V(ObjectLookupGetter, kNone)                                 \
-  V(ObjectLookupSetter, kNone)                                 \
-  V(ObjectPreventExtensions, kNone)                            \
-  V(ObjectProtoToString, kNone)                                \
-  V(ObjectSeal, kNone)                                         \
-  V(ObjectValues, kNone)                                       \
-                                                               \
-  V(ProxyConstructor, kNone)                                   \
-  V(ProxyConstructor_ConstructStub, kTarget)                   \
-                                                               \
-  V(ReflectDefineProperty, kNone)                              \
-  V(ReflectDeleteProperty, kNone)                              \
-  V(ReflectGet, kNone)                                         \
-  V(ReflectGetOwnPropertyDescriptor, kNone)                    \
-  V(ReflectGetPrototypeOf, kNone)                              \
-  V(ReflectHas, kNone)                                         \
-  V(ReflectIsExtensible, kNone)                                \
-  V(ReflectOwnKeys, kNone)                                     \
-  V(ReflectPreventExtensions, kNone)                           \
-  V(ReflectSet, kNone)                                         \
-  V(ReflectSetPrototypeOf, kNone)                              \
-                                                               \
-  V(StringFromCharCode, kNone)                                 \
-                                                               \
-  V(SymbolConstructor, kNone)                                  \
-  V(SymbolConstructor_ConstructStub, kTarget)                  \
-                                                               \
-  V(HandleApiCall, kTargetAndNewTarget)                        \
-  V(HandleApiCallAsFunction, kNone)                            \
-  V(HandleApiCallAsConstructor, kNone)                         \
-                                                               \
-  V(RestrictedFunctionPropertiesThrower, kNone)                \
-  V(RestrictedStrictArgumentsPropertiesThrower, kNone)
+#define BUILTIN_LIST_C(V)                 \
+  V(Illegal)                              \
+                                          \
+  V(EmptyFunction)                        \
+                                          \
+  V(ArrayConcat)                          \
+  V(ArrayPop)                             \
+  V(ArrayPush)                            \
+  V(ArrayShift)                           \
+  V(ArraySlice)                           \
+  V(ArraySplice)                          \
+  V(ArrayUnshift)                         \
+                                          \
+  V(ArrayBufferConstructor)               \
+  V(ArrayBufferConstructor_ConstructStub) \
+  V(ArrayBufferIsView)                    \
+                                          \
+  V(BooleanConstructor)                   \
+  V(BooleanConstructor_ConstructStub)     \
+  V(BooleanPrototypeToString)             \
+  V(BooleanPrototypeValueOf)              \
+                                          \
+  V(DataViewConstructor)                  \
+  V(DataViewConstructor_ConstructStub)    \
+  V(DataViewPrototypeGetBuffer)           \
+  V(DataViewPrototypeGetByteLength)       \
+  V(DataViewPrototypeGetByteOffset)       \
+                                          \
+  V(DateConstructor)                      \
+  V(DateConstructor_ConstructStub)        \
+  V(DateNow)                              \
+  V(DateParse)                            \
+  V(DateUTC)                              \
+  V(DatePrototypeSetDate)                 \
+  V(DatePrototypeSetFullYear)             \
+  V(DatePrototypeSetHours)                \
+  V(DatePrototypeSetMilliseconds)         \
+  V(DatePrototypeSetMinutes)              \
+  V(DatePrototypeSetMonth)                \
+  V(DatePrototypeSetSeconds)              \
+  V(DatePrototypeSetTime)                 \
+  V(DatePrototypeSetUTCDate)              \
+  V(DatePrototypeSetUTCFullYear)          \
+  V(DatePrototypeSetUTCHours)             \
+  V(DatePrototypeSetUTCMilliseconds)      \
+  V(DatePrototypeSetUTCMinutes)           \
+  V(DatePrototypeSetUTCMonth)             \
+  V(DatePrototypeSetUTCSeconds)           \
+  V(DatePrototypeToDateString)            \
+  V(DatePrototypeToISOString)             \
+  V(DatePrototypeToPrimitive)             \
+  V(DatePrototypeToUTCString)             \
+  V(DatePrototypeToString)                \
+  V(DatePrototypeToTimeString)            \
+  V(DatePrototypeValueOf)                 \
+  V(DatePrototypeGetYear)                 \
+  V(DatePrototypeSetYear)                 \
+  V(DatePrototypeToJson)                  \
+                                          \
+  V(FunctionConstructor)                  \
+  V(FunctionPrototypeBind)                \
+  V(FunctionPrototypeToString)            \
+                                          \
+  V(GeneratorFunctionConstructor)         \
+  V(AsyncFunctionConstructor)             \
+                                          \
+  V(GlobalDecodeURI)                      \
+  V(GlobalDecodeURIComponent)             \
+  V(GlobalEncodeURI)                      \
+  V(GlobalEncodeURIComponent)             \
+  V(GlobalEscape)                         \
+  V(GlobalUnescape)                       \
+                                          \
+  V(GlobalEval)                           \
+                                          \
+  V(JsonParse)                            \
+  V(JsonStringify)                        \
+                                          \
+  V(MathAcos)                             \
+  V(MathAsin)                             \
+  V(MathFround)                           \
+  V(MathImul)                             \
+                                          \
+  V(ObjectAssign)                         \
+  V(ObjectCreate)                         \
+  V(ObjectDefineGetter)                   \
+  V(ObjectDefineProperties)               \
+  V(ObjectDefineProperty)                 \
+  V(ObjectDefineSetter)                   \
+  V(ObjectEntries)                        \
+  V(ObjectFreeze)                         \
+  V(ObjectGetOwnPropertyDescriptor)       \
+  V(ObjectGetOwnPropertyDescriptors)      \
+  V(ObjectGetOwnPropertyNames)            \
+  V(ObjectGetOwnPropertySymbols)          \
+  V(ObjectGetPrototypeOf)                 \
+  V(ObjectIs)                             \
+  V(ObjectIsExtensible)                   \
+  V(ObjectIsFrozen)                       \
+  V(ObjectIsSealed)                       \
+  V(ObjectKeys)                           \
+  V(ObjectLookupGetter)                   \
+  V(ObjectLookupSetter)                   \
+  V(ObjectPreventExtensions)              \
+  V(ObjectProtoToString)                  \
+  V(ObjectSeal)                           \
+  V(ObjectValues)                         \
+                                          \
+  V(ProxyConstructor)                     \
+  V(ProxyConstructor_ConstructStub)       \
+                                          \
+  V(ReflectDefineProperty)                \
+  V(ReflectDeleteProperty)                \
+  V(ReflectGet)                           \
+  V(ReflectGetOwnPropertyDescriptor)      \
+  V(ReflectGetPrototypeOf)                \
+  V(ReflectHas)                           \
+  V(ReflectIsExtensible)                  \
+  V(ReflectOwnKeys)                       \
+  V(ReflectPreventExtensions)             \
+  V(ReflectSet)                           \
+  V(ReflectSetPrototypeOf)                \
+                                          \
+  V(StringFromCodePoint)                  \
+                                          \
+  V(StringPrototypeTrim)                  \
+  V(StringPrototypeTrimLeft)              \
+  V(StringPrototypeTrimRight)             \
+                                          \
+  V(SymbolConstructor)                    \
+  V(SymbolConstructor_ConstructStub)      \
+                                          \
+  V(TypedArrayPrototypeBuffer)            \
+                                          \
+  V(HandleApiCall)                        \
+  V(HandleApiCallAsFunction)              \
+  V(HandleApiCallAsConstructor)           \
+                                          \
+  V(RestrictedFunctionPropertiesThrower)  \
+  V(RestrictedStrictArgumentsPropertiesThrower)
 
 // Define list of builtins implemented in assembly.
-#define BUILTIN_LIST_A(V)                                                      \
-  V(AllocateInNewSpace, BUILTIN, UNINITIALIZED, kNoExtraICState)               \
-  V(AllocateInOldSpace, BUILTIN, UNINITIALIZED, kNoExtraICState)               \
-                                                                               \
-  V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState)       \
-                                                                               \
-  V(ConstructedNonConstructable, BUILTIN, UNINITIALIZED, kNoExtraICState)      \
-                                                                               \
-  V(CallFunction_ReceiverIsNullOrUndefined, BUILTIN, UNINITIALIZED,            \
-    kNoExtraICState)                                                           \
-  V(CallFunction_ReceiverIsNotNullOrUndefined, BUILTIN, UNINITIALIZED,         \
-    kNoExtraICState)                                                           \
-  V(CallFunction_ReceiverIsAny, BUILTIN, UNINITIALIZED, kNoExtraICState)       \
-  V(TailCallFunction_ReceiverIsNullOrUndefined, BUILTIN, UNINITIALIZED,        \
-    kNoExtraICState)                                                           \
-  V(TailCallFunction_ReceiverIsNotNullOrUndefined, BUILTIN, UNINITIALIZED,     \
-    kNoExtraICState)                                                           \
-  V(TailCallFunction_ReceiverIsAny, BUILTIN, UNINITIALIZED, kNoExtraICState)   \
-  V(CallBoundFunction, BUILTIN, UNINITIALIZED, kNoExtraICState)                \
-  V(TailCallBoundFunction, BUILTIN, UNINITIALIZED, kNoExtraICState)            \
-  V(Call_ReceiverIsNullOrUndefined, BUILTIN, UNINITIALIZED, kNoExtraICState)   \
-  V(Call_ReceiverIsNotNullOrUndefined, BUILTIN, UNINITIALIZED,                 \
-    kNoExtraICState)                                                           \
-  V(Call_ReceiverIsAny, BUILTIN, UNINITIALIZED, kNoExtraICState)               \
-  V(TailCall_ReceiverIsNullOrUndefined, BUILTIN, UNINITIALIZED,                \
-    kNoExtraICState)                                                           \
-  V(TailCall_ReceiverIsNotNullOrUndefined, BUILTIN, UNINITIALIZED,             \
-    kNoExtraICState)                                                           \
-  V(TailCall_ReceiverIsAny, BUILTIN, UNINITIALIZED, kNoExtraICState)           \
-                                                                               \
-  V(ConstructFunction, BUILTIN, UNINITIALIZED, kNoExtraICState)                \
-  V(ConstructBoundFunction, BUILTIN, UNINITIALIZED, kNoExtraICState)           \
-  V(ConstructProxy, BUILTIN, UNINITIALIZED, kNoExtraICState)                   \
-  V(Construct, BUILTIN, UNINITIALIZED, kNoExtraICState)                        \
-                                                                               \
-  V(Apply, BUILTIN, UNINITIALIZED, kNoExtraICState)                            \
-                                                                               \
-  V(HandleFastApiCall, BUILTIN, UNINITIALIZED, kNoExtraICState)                \
-                                                                               \
-  V(InOptimizationQueue, BUILTIN, UNINITIALIZED, kNoExtraICState)              \
-  V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, kNoExtraICState)           \
-  V(JSBuiltinsConstructStub, BUILTIN, UNINITIALIZED, kNoExtraICState)          \
-  V(JSBuiltinsConstructStubForDerived, BUILTIN, UNINITIALIZED,                 \
-    kNoExtraICState)                                                           \
-  V(JSConstructStubApi, BUILTIN, UNINITIALIZED, kNoExtraICState)               \
-  V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState)                \
-  V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState)       \
-  V(ResumeGeneratorTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState)        \
-  V(CompileLazy, BUILTIN, UNINITIALIZED, kNoExtraICState)                      \
-  V(CompileBaseline, BUILTIN, UNINITIALIZED, kNoExtraICState)                  \
-  V(CompileOptimized, BUILTIN, UNINITIALIZED, kNoExtraICState)                 \
-  V(CompileOptimizedConcurrent, BUILTIN, UNINITIALIZED, kNoExtraICState)       \
-  V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState)                \
-  V(NotifySoftDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState)            \
-  V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState)            \
-  V(NotifyStubFailure, BUILTIN, UNINITIALIZED, kNoExtraICState)                \
-  V(NotifyStubFailureSaveDoubles, BUILTIN, UNINITIALIZED, kNoExtraICState)     \
-                                                                               \
-  V(InterpreterEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState)       \
-  V(InterpreterPushArgsAndCall, BUILTIN, UNINITIALIZED, kNoExtraICState)       \
-  V(InterpreterPushArgsAndTailCall, BUILTIN, UNINITIALIZED, kNoExtraICState)   \
-  V(InterpreterPushArgsAndConstruct, BUILTIN, UNINITIALIZED, kNoExtraICState)  \
-  V(InterpreterEnterBytecodeDispatch, BUILTIN, UNINITIALIZED, kNoExtraICState) \
-                                                                               \
-  V(LoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState)                      \
-  V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState)                 \
-  V(StoreIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState)                     \
-  V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState)                \
-  V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, kNoExtraICState)             \
-  V(KeyedLoadIC_Megamorphic, KEYED_LOAD_IC, MEGAMORPHIC, kNoExtraICState)      \
-                                                                               \
-  V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC,                            \
-    StoreICState::kStrictModeState)                                            \
-                                                                               \
-  V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, kNoExtraICState)               \
-  V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC,                         \
-    StoreICState::kStrictModeState)                                            \
-                                                                               \
-  V(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, MEGAMORPHIC, kNoExtraICState)    \
-  V(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC, MEGAMORPHIC,              \
-    StoreICState::kStrictModeState)                                            \
-                                                                               \
-  V(DatePrototypeGetDate, BUILTIN, UNINITIALIZED, kNoExtraICState)             \
-  V(DatePrototypeGetDay, BUILTIN, UNINITIALIZED, kNoExtraICState)              \
-  V(DatePrototypeGetFullYear, BUILTIN, UNINITIALIZED, kNoExtraICState)         \
-  V(DatePrototypeGetHours, BUILTIN, UNINITIALIZED, kNoExtraICState)            \
-  V(DatePrototypeGetMilliseconds, BUILTIN, UNINITIALIZED, kNoExtraICState)     \
-  V(DatePrototypeGetMinutes, BUILTIN, UNINITIALIZED, kNoExtraICState)          \
-  V(DatePrototypeGetMonth, BUILTIN, UNINITIALIZED, kNoExtraICState)            \
-  V(DatePrototypeGetSeconds, BUILTIN, UNINITIALIZED, kNoExtraICState)          \
-  V(DatePrototypeGetTime, BUILTIN, UNINITIALIZED, kNoExtraICState)             \
-  V(DatePrototypeGetTimezoneOffset, BUILTIN, UNINITIALIZED, kNoExtraICState)   \
-  V(DatePrototypeGetUTCDate, BUILTIN, UNINITIALIZED, kNoExtraICState)          \
-  V(DatePrototypeGetUTCDay, BUILTIN, UNINITIALIZED, kNoExtraICState)           \
-  V(DatePrototypeGetUTCFullYear, BUILTIN, UNINITIALIZED, kNoExtraICState)      \
-  V(DatePrototypeGetUTCHours, BUILTIN, UNINITIALIZED, kNoExtraICState)         \
-  V(DatePrototypeGetUTCMilliseconds, BUILTIN, UNINITIALIZED, kNoExtraICState)  \
-  V(DatePrototypeGetUTCMinutes, BUILTIN, UNINITIALIZED, kNoExtraICState)       \
-  V(DatePrototypeGetUTCMonth, BUILTIN, UNINITIALIZED, kNoExtraICState)         \
-  V(DatePrototypeGetUTCSeconds, BUILTIN, UNINITIALIZED, kNoExtraICState)       \
-                                                                               \
-  V(FunctionPrototypeApply, BUILTIN, UNINITIALIZED, kNoExtraICState)           \
-  V(FunctionPrototypeCall, BUILTIN, UNINITIALIZED, kNoExtraICState)            \
-                                                                               \
-  V(ReflectApply, BUILTIN, UNINITIALIZED, kNoExtraICState)                     \
-  V(ReflectConstruct, BUILTIN, UNINITIALIZED, kNoExtraICState)                 \
-                                                                               \
-  V(InternalArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState)                \
-  V(ArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState)                        \
-                                                                               \
-  V(MathMax, BUILTIN, UNINITIALIZED, kNoExtraICState)                          \
-  V(MathMin, BUILTIN, UNINITIALIZED, kNoExtraICState)                          \
-                                                                               \
-  V(NumberConstructor, BUILTIN, UNINITIALIZED, kNoExtraICState)                \
-  V(NumberConstructor_ConstructStub, BUILTIN, UNINITIALIZED, kNoExtraICState)  \
-                                                                               \
-  V(StringConstructor, BUILTIN, UNINITIALIZED, kNoExtraICState)                \
-  V(StringConstructor_ConstructStub, BUILTIN, UNINITIALIZED, kNoExtraICState)  \
-                                                                               \
-  V(OnStackReplacement, BUILTIN, UNINITIALIZED, kNoExtraICState)               \
-  V(InterruptCheck, BUILTIN, UNINITIALIZED, kNoExtraICState)                   \
-  V(StackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState)                       \
-                                                                               \
-  V(MarkCodeAsToBeExecutedOnce, BUILTIN, UNINITIALIZED, kNoExtraICState)       \
-  V(MarkCodeAsExecutedOnce, BUILTIN, UNINITIALIZED, kNoExtraICState)           \
-  V(MarkCodeAsExecutedTwice, BUILTIN, UNINITIALIZED, kNoExtraICState)          \
+#define BUILTIN_LIST_A(V)                                                    \
+  V(AllocateInNewSpace, BUILTIN, kNoExtraICState)                            \
+  V(AllocateInOldSpace, BUILTIN, kNoExtraICState)                            \
+                                                                             \
+  V(ArgumentsAdaptorTrampoline, BUILTIN, kNoExtraICState)                    \
+                                                                             \
+  V(ConstructedNonConstructable, BUILTIN, kNoExtraICState)                   \
+                                                                             \
+  V(CallFunction_ReceiverIsNullOrUndefined, BUILTIN, kNoExtraICState)        \
+  V(CallFunction_ReceiverIsNotNullOrUndefined, BUILTIN, kNoExtraICState)     \
+  V(CallFunction_ReceiverIsAny, BUILTIN, kNoExtraICState)                    \
+  V(TailCallFunction_ReceiverIsNullOrUndefined, BUILTIN, kNoExtraICState)    \
+  V(TailCallFunction_ReceiverIsNotNullOrUndefined, BUILTIN, kNoExtraICState) \
+  V(TailCallFunction_ReceiverIsAny, BUILTIN, kNoExtraICState)                \
+  V(CallBoundFunction, BUILTIN, kNoExtraICState)                             \
+  V(TailCallBoundFunction, BUILTIN, kNoExtraICState)                         \
+  V(Call_ReceiverIsNullOrUndefined, BUILTIN, kNoExtraICState)                \
+  V(Call_ReceiverIsNotNullOrUndefined, BUILTIN, kNoExtraICState)             \
+  V(Call_ReceiverIsAny, BUILTIN, kNoExtraICState)                            \
+  V(TailCall_ReceiverIsNullOrUndefined, BUILTIN, kNoExtraICState)            \
+  V(TailCall_ReceiverIsNotNullOrUndefined, BUILTIN, kNoExtraICState)         \
+  V(TailCall_ReceiverIsAny, BUILTIN, kNoExtraICState)                        \
+                                                                             \
+  V(ConstructFunction, BUILTIN, kNoExtraICState)                             \
+  V(ConstructBoundFunction, BUILTIN, kNoExtraICState)                        \
+  V(ConstructProxy, BUILTIN, kNoExtraICState)                                \
+  V(Construct, BUILTIN, kNoExtraICState)                                     \
+                                                                             \
+  V(StringToNumber, BUILTIN, kNoExtraICState)                                \
+  V(NonNumberToNumber, BUILTIN, kNoExtraICState)                             \
+  V(ToNumber, BUILTIN, kNoExtraICState)                                      \
+                                                                             \
+  V(Apply, BUILTIN, kNoExtraICState)                                         \
+                                                                             \
+  V(HandleFastApiCall, BUILTIN, kNoExtraICState)                             \
+                                                                             \
+  V(InOptimizationQueue, BUILTIN, kNoExtraICState)                           \
+  V(JSConstructStubGeneric, BUILTIN, kNoExtraICState)                        \
+  V(JSBuiltinsConstructStub, BUILTIN, kNoExtraICState)                       \
+  V(JSBuiltinsConstructStubForDerived, BUILTIN, kNoExtraICState)             \
+  V(JSConstructStubApi, BUILTIN, kNoExtraICState)                            \
+  V(JSEntryTrampoline, BUILTIN, kNoExtraICState)                             \
+  V(JSConstructEntryTrampoline, BUILTIN, kNoExtraICState)                    \
+  V(ResumeGeneratorTrampoline, BUILTIN, kNoExtraICState)                     \
+  V(CompileLazy, BUILTIN, kNoExtraICState)                                   \
+  V(CompileBaseline, BUILTIN, kNoExtraICState)                               \
+  V(CompileOptimized, BUILTIN, kNoExtraICState)                              \
+  V(CompileOptimizedConcurrent, BUILTIN, kNoExtraICState)                    \
+  V(NotifyDeoptimized, BUILTIN, kNoExtraICState)                             \
+  V(NotifySoftDeoptimized, BUILTIN, kNoExtraICState)                         \
+  V(NotifyLazyDeoptimized, BUILTIN, kNoExtraICState)                         \
+  V(NotifyStubFailure, BUILTIN, kNoExtraICState)                             \
+  V(NotifyStubFailureSaveDoubles, BUILTIN, kNoExtraICState)                  \
+                                                                             \
+  V(InterpreterEntryTrampoline, BUILTIN, kNoExtraICState)                    \
+  V(InterpreterMarkBaselineOnReturn, BUILTIN, kNoExtraICState)               \
+  V(InterpreterPushArgsAndCall, BUILTIN, kNoExtraICState)                    \
+  V(InterpreterPushArgsAndTailCall, BUILTIN, kNoExtraICState)                \
+  V(InterpreterPushArgsAndConstruct, BUILTIN, kNoExtraICState)               \
+  V(InterpreterEnterBytecodeDispatch, BUILTIN, kNoExtraICState)              \
+                                                                             \
+  V(KeyedLoadIC_Miss, BUILTIN, kNoExtraICState)                              \
+  V(StoreIC_Miss, BUILTIN, kNoExtraICState)                                  \
+  V(KeyedStoreIC_Miss, BUILTIN, kNoExtraICState)                             \
+  V(LoadIC_Getter_ForDeopt, LOAD_IC, kNoExtraICState)                        \
+  V(KeyedLoadIC_Megamorphic, KEYED_LOAD_IC, kNoExtraICState)                 \
+                                                                             \
+  V(StoreIC_Setter_ForDeopt, STORE_IC, StoreICState::kStrictModeState)       \
+                                                                             \
+  V(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, kNoExtraICState)               \
+  V(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC,                         \
+    StoreICState::kStrictModeState)                                          \
+                                                                             \
+  V(DatePrototypeGetDate, BUILTIN, kNoExtraICState)                          \
+  V(DatePrototypeGetDay, BUILTIN, kNoExtraICState)                           \
+  V(DatePrototypeGetFullYear, BUILTIN, kNoExtraICState)                      \
+  V(DatePrototypeGetHours, BUILTIN, kNoExtraICState)                         \
+  V(DatePrototypeGetMilliseconds, BUILTIN, kNoExtraICState)                  \
+  V(DatePrototypeGetMinutes, BUILTIN, kNoExtraICState)                       \
+  V(DatePrototypeGetMonth, BUILTIN, kNoExtraICState)                         \
+  V(DatePrototypeGetSeconds, BUILTIN, kNoExtraICState)                       \
+  V(DatePrototypeGetTime, BUILTIN, kNoExtraICState)                          \
+  V(DatePrototypeGetTimezoneOffset, BUILTIN, kNoExtraICState)                \
+  V(DatePrototypeGetUTCDate, BUILTIN, kNoExtraICState)                       \
+  V(DatePrototypeGetUTCDay, BUILTIN, kNoExtraICState)                        \
+  V(DatePrototypeGetUTCFullYear, BUILTIN, kNoExtraICState)                   \
+  V(DatePrototypeGetUTCHours, BUILTIN, kNoExtraICState)                      \
+  V(DatePrototypeGetUTCMilliseconds, BUILTIN, kNoExtraICState)               \
+  V(DatePrototypeGetUTCMinutes, BUILTIN, kNoExtraICState)                    \
+  V(DatePrototypeGetUTCMonth, BUILTIN, kNoExtraICState)                      \
+  V(DatePrototypeGetUTCSeconds, BUILTIN, kNoExtraICState)                    \
+                                                                             \
+  V(FunctionPrototypeApply, BUILTIN, kNoExtraICState)                        \
+  V(FunctionPrototypeCall, BUILTIN, kNoExtraICState)                         \
+                                                                             \
+  V(ReflectApply, BUILTIN, kNoExtraICState)                                  \
+  V(ReflectConstruct, BUILTIN, kNoExtraICState)                              \
+                                                                             \
+  V(InternalArrayCode, BUILTIN, kNoExtraICState)                             \
+  V(ArrayCode, BUILTIN, kNoExtraICState)                                     \
+                                                                             \
+  V(MathMax, BUILTIN, kNoExtraICState)                                       \
+  V(MathMin, BUILTIN, kNoExtraICState)                                       \
+                                                                             \
+  V(NumberConstructor, BUILTIN, kNoExtraICState)                             \
+  V(NumberConstructor_ConstructStub, BUILTIN, kNoExtraICState)               \
+                                                                             \
+  V(StringConstructor, BUILTIN, kNoExtraICState)                             \
+  V(StringConstructor_ConstructStub, BUILTIN, kNoExtraICState)               \
+                                                                             \
+  V(OnStackReplacement, BUILTIN, kNoExtraICState)                            \
+  V(InterruptCheck, BUILTIN, kNoExtraICState)                                \
+  V(StackCheck, BUILTIN, kNoExtraICState)                                    \
+                                                                             \
+  V(MarkCodeAsToBeExecutedOnce, BUILTIN, kNoExtraICState)                    \
+  V(MarkCodeAsExecutedOnce, BUILTIN, kNoExtraICState)                        \
+  V(MarkCodeAsExecutedTwice, BUILTIN, kNoExtraICState)                       \
   CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
 
 // Define list of builtins implemented in TurboFan (with JS linkage).
-#define BUILTIN_LIST_T(V)            \
-  V(FunctionPrototypeHasInstance, 2) \
-  V(GeneratorPrototypeNext, 2)       \
-  V(GeneratorPrototypeReturn, 2)     \
-  V(GeneratorPrototypeThrow, 2)      \
-  V(MathCeil, 2)                     \
-  V(MathClz32, 2)                    \
-  V(MathFloor, 2)                    \
-  V(MathRound, 2)                    \
-  V(MathSqrt, 2)                     \
-  V(MathTrunc, 2)                    \
-  V(ObjectHasOwnProperty, 2)         \
-  V(ArrayIsArray, 2)                 \
-  V(StringPrototypeCharAt, 2)        \
-  V(StringPrototypeCharCodeAt, 2)    \
-  V(AtomicsLoad, 3)                  \
+#define BUILTIN_LIST_T(V)             \
+  V(FunctionPrototypeHasInstance, 2)  \
+  V(GeneratorPrototypeNext, 2)        \
+  V(GeneratorPrototypeReturn, 2)      \
+  V(GeneratorPrototypeThrow, 2)       \
+  V(MathAtan, 2)                      \
+  V(MathAtan2, 3)                     \
+  V(MathAtanh, 2)                     \
+  V(MathCeil, 2)                      \
+  V(MathCbrt, 2)                      \
+  V(MathExpm1, 2)                     \
+  V(MathClz32, 2)                     \
+  V(MathCos, 2)                       \
+  V(MathExp, 2)                       \
+  V(MathFloor, 2)                     \
+  V(MathLog, 2)                       \
+  V(MathLog1p, 2)                     \
+  V(MathLog2, 2)                      \
+  V(MathLog10, 2)                     \
+  V(MathRound, 2)                     \
+  V(MathSin, 2)                       \
+  V(MathTan, 2)                       \
+  V(MathSqrt, 2)                      \
+  V(MathTrunc, 2)                     \
+  V(ObjectHasOwnProperty, 2)          \
+  V(ArrayIsArray, 2)                  \
+  V(StringFromCharCode, 2)            \
+  V(StringPrototypeCharAt, 2)         \
+  V(StringPrototypeCharCodeAt, 2)     \
+  V(TypedArrayPrototypeByteLength, 1) \
+  V(TypedArrayPrototypeByteOffset, 1) \
+  V(TypedArrayPrototypeLength, 1)     \
+  V(AtomicsLoad, 3)                   \
   V(AtomicsStore, 4)
 
+// Define list of builtins implemented in TurboFan (with CallStub linkage).
+#define BUILTIN_LIST_S(V)                                                   \
+  V(LoadGlobalIC_Miss, BUILTIN, kNoExtraICState, LoadGlobalWithVector)      \
+  V(LoadGlobalIC_Slow, HANDLER, Code::LOAD_GLOBAL_IC, LoadGlobalWithVector) \
+  V(LoadIC_Miss, BUILTIN, kNoExtraICState, LoadWithVector)                  \
+  V(LoadIC_Slow, HANDLER, Code::LOAD_IC, LoadWithVector)
+
 // Define list of builtin handlers implemented in assembly.
 #define BUILTIN_LIST_H(V)                    \
-  V(LoadIC_Slow,             LOAD_IC)        \
   V(KeyedLoadIC_Slow,        KEYED_LOAD_IC)  \
   V(StoreIC_Slow,            STORE_IC)       \
   V(KeyedStoreIC_Slow,       KEYED_STORE_IC) \
@@ -337,11 +352,10 @@
   V(StoreIC_Normal,          STORE_IC)
 
 // Define list of builtins used by the debugger implemented in assembly.
-#define BUILTIN_LIST_DEBUG_A(V)                                 \
-  V(Return_DebugBreak, BUILTIN, DEBUG_STUB, kNoExtraICState)    \
-  V(Slot_DebugBreak, BUILTIN, DEBUG_STUB, kNoExtraICState)      \
-  V(FrameDropper_LiveEdit, BUILTIN, DEBUG_STUB, kNoExtraICState)
-
+#define BUILTIN_LIST_DEBUG_A(V)                  \
+  V(Return_DebugBreak, BUILTIN, kNoExtraICState) \
+  V(Slot_DebugBreak, BUILTIN, kNoExtraICState)   \
+  V(FrameDropper_LiveEdit, BUILTIN, kNoExtraICState)
 
 class BuiltinFunctionTable;
 class ObjectVisitor;
@@ -363,40 +377,45 @@
   const char* Lookup(byte* pc);
 
   enum Name {
-#define DEF_ENUM_C(name, ignore) k##name,
-#define DEF_ENUM_A(name, kind, state, extra) k##name,
+#define DEF_ENUM_C(name) k##name,
+#define DEF_ENUM_A(name, kind, extra) k##name,
 #define DEF_ENUM_T(name, argc) k##name,
+#define DEF_ENUM_S(name, kind, extra, interface_descriptor) k##name,
 #define DEF_ENUM_H(name, kind) k##name,
     BUILTIN_LIST_C(DEF_ENUM_C) BUILTIN_LIST_A(DEF_ENUM_A)
-        BUILTIN_LIST_T(DEF_ENUM_T) BUILTIN_LIST_H(DEF_ENUM_H)
-            BUILTIN_LIST_DEBUG_A(DEF_ENUM_A)
+        BUILTIN_LIST_T(DEF_ENUM_T) BUILTIN_LIST_S(DEF_ENUM_S)
+            BUILTIN_LIST_H(DEF_ENUM_H) BUILTIN_LIST_DEBUG_A(DEF_ENUM_A)
 #undef DEF_ENUM_C
 #undef DEF_ENUM_A
 #undef DEF_ENUM_T
+#undef DEF_ENUM_S
 #undef DEF_ENUM_H
                 builtin_count
   };
 
   enum CFunctionId {
-#define DEF_ENUM_C(name, ignore) c_##name,
+#define DEF_ENUM_C(name) c_##name,
     BUILTIN_LIST_C(DEF_ENUM_C)
 #undef DEF_ENUM_C
     cfunction_count
   };
 
-#define DECLARE_BUILTIN_ACCESSOR_C(name, ignore) Handle<Code> name();
-#define DECLARE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
-  Handle<Code> name();
+#define DECLARE_BUILTIN_ACCESSOR_C(name) Handle<Code> name();
+#define DECLARE_BUILTIN_ACCESSOR_A(name, kind, extra) Handle<Code> name();
 #define DECLARE_BUILTIN_ACCESSOR_T(name, argc) Handle<Code> name();
+#define DECLARE_BUILTIN_ACCESSOR_S(name, kind, extra, interface_descriptor) \
+  Handle<Code> name();
 #define DECLARE_BUILTIN_ACCESSOR_H(name, kind) Handle<Code> name();
   BUILTIN_LIST_C(DECLARE_BUILTIN_ACCESSOR_C)
   BUILTIN_LIST_A(DECLARE_BUILTIN_ACCESSOR_A)
   BUILTIN_LIST_T(DECLARE_BUILTIN_ACCESSOR_T)
+  BUILTIN_LIST_S(DECLARE_BUILTIN_ACCESSOR_S)
   BUILTIN_LIST_H(DECLARE_BUILTIN_ACCESSOR_H)
   BUILTIN_LIST_DEBUG_A(DECLARE_BUILTIN_ACCESSOR_A)
 #undef DECLARE_BUILTIN_ACCESSOR_C
 #undef DECLARE_BUILTIN_ACCESSOR_A
 #undef DECLARE_BUILTIN_ACCESSOR_T
+#undef DECLARE_BUILTIN_ACCESSOR_S
 #undef DECLARE_BUILTIN_ACCESSOR_H
 
   // Convenience wrappers.
@@ -431,8 +450,8 @@
   bool is_initialized() const { return initialized_; }
 
   MUST_USE_RESULT static MaybeHandle<Object> InvokeApiFunction(
-      Handle<HeapObject> function, Handle<Object> receiver, int argc,
-      Handle<Object> args[]);
+      Isolate* isolate, Handle<HeapObject> function, Handle<Object> receiver,
+      int argc, Handle<Object> args[]);
 
  private:
   Builtins();
@@ -446,9 +465,7 @@
   Object* builtins_[builtin_count];
   const char* names_[builtin_count];
 
-  static void Generate_Adaptor(MacroAssembler* masm,
-                               CFunctionId id,
-                               BuiltinExtraArguments extra_args);
+  static void Generate_Adaptor(MacroAssembler* masm, CFunctionId id);
   static void Generate_AllocateInNewSpace(MacroAssembler* masm);
   static void Generate_AllocateInOldSpace(MacroAssembler* masm);
   static void Generate_ConstructedNonConstructable(MacroAssembler* masm);
@@ -470,6 +487,9 @@
   static void Generate_NotifyStubFailure(MacroAssembler* masm);
   static void Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm);
   static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
+  static void Generate_StringToNumber(MacroAssembler* masm);
+  static void Generate_NonNumberToNumber(MacroAssembler* masm);
+  static void Generate_ToNumber(MacroAssembler* masm);
 
   static void Generate_Apply(MacroAssembler* masm);
 
@@ -601,12 +621,34 @@
   static void Generate_InternalArrayCode(MacroAssembler* masm);
   static void Generate_ArrayCode(MacroAssembler* masm);
 
+  // ES6 section 20.2.2.6 Math.atan ( x )
+  static void Generate_MathAtan(CodeStubAssembler* assembler);
+  // ES6 section 20.2.2.8 Math.atan2 ( y, x )
+  static void Generate_MathAtan2(CodeStubAssembler* assembler);
+  // ES6 section 20.2.2.7 Math.atanh ( x )
+  static void Generate_MathAtanh(CodeStubAssembler* assembler);
   // ES6 section 20.2.2.10 Math.ceil ( x )
   static void Generate_MathCeil(CodeStubAssembler* assembler);
+  // ES6 section 20.2.2.9 Math.ceil ( x )
+  static void Generate_MathCbrt(CodeStubAssembler* assembler);
+  // ES6 section 20.2.2.15 Math.expm1 ( x )
+  static void Generate_MathExpm1(CodeStubAssembler* assembler);
   // ES6 section 20.2.2.11 Math.clz32 ( x )
   static void Generate_MathClz32(CodeStubAssembler* assembler);
+  // ES6 section 20.2.2.12 Math.cos ( x )
+  static void Generate_MathCos(CodeStubAssembler* assembler);
+  // ES6 section 20.2.2.14 Math.exp ( x )
+  static void Generate_MathExp(CodeStubAssembler* assembler);
   // ES6 section 20.2.2.16 Math.floor ( x )
   static void Generate_MathFloor(CodeStubAssembler* assembler);
+  // ES6 section 20.2.2.20 Math.log ( x )
+  static void Generate_MathLog(CodeStubAssembler* assembler);
+  // ES6 section 20.2.2.21 Math.log ( x )
+  static void Generate_MathLog1p(CodeStubAssembler* assembler);
+
+  static void Generate_MathLog2(CodeStubAssembler* assembler);
+  static void Generate_MathLog10(CodeStubAssembler* assembler);
+
   enum class MathMaxMinKind { kMax, kMin };
   static void Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind);
   // ES6 section 20.2.2.24 Math.max ( value1, value2 , ...values )
@@ -619,8 +661,12 @@
   }
   // ES6 section 20.2.2.28 Math.round ( x )
   static void Generate_MathRound(CodeStubAssembler* assembler);
+  // ES6 section 20.2.2.20 Math.sin ( x )
+  static void Generate_MathSin(CodeStubAssembler* assembler);
   // ES6 section 20.2.2.32 Math.sqrt ( x )
   static void Generate_MathSqrt(CodeStubAssembler* assembler);
+  // ES6 section 20.2.2.33 Math.sin ( x )
+  static void Generate_MathTan(CodeStubAssembler* assembler);
   // ES6 section 20.2.2.35 Math.trunc ( x )
   static void Generate_MathTrunc(CodeStubAssembler* assembler);
 
@@ -646,6 +692,8 @@
   // ES6 section 22.1.2.2 Array.isArray
   static void Generate_ArrayIsArray(CodeStubAssembler* assembler);
 
+  // ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits )
+  static void Generate_StringFromCharCode(CodeStubAssembler* assembler);
   // ES6 section 21.1.3.1 String.prototype.charAt ( pos )
   static void Generate_StringPrototypeCharAt(CodeStubAssembler* assembler);
   // ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos )
@@ -653,12 +701,23 @@
 
   static void Generate_StringConstructor(MacroAssembler* masm);
   static void Generate_StringConstructor_ConstructStub(MacroAssembler* masm);
+
+  // ES6 section 22.2.3.2 get %TypedArray%.prototype.byteLength
+  static void Generate_TypedArrayPrototypeByteLength(
+      CodeStubAssembler* assembler);
+  // ES6 section 22.2.3.3 get %TypedArray%.prototype.byteOffset
+  static void Generate_TypedArrayPrototypeByteOffset(
+      CodeStubAssembler* assembler);
+  // ES6 section 22.2.3.18 get %TypedArray%.prototype.length
+  static void Generate_TypedArrayPrototypeLength(CodeStubAssembler* assembler);
+
   static void Generate_OnStackReplacement(MacroAssembler* masm);
   static void Generate_InterruptCheck(MacroAssembler* masm);
   static void Generate_StackCheck(MacroAssembler* masm);
 
   static void Generate_InterpreterEntryTrampoline(MacroAssembler* masm);
   static void Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm);
+  static void Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm);
   static void Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
     return Generate_InterpreterPushArgsAndCallImpl(masm,
                                                    TailCallMode::kDisallow);
diff --git a/src/code-events.h b/src/code-events.h
new file mode 100644
index 0000000..9ae1cae
--- /dev/null
+++ b/src/code-events.h
@@ -0,0 +1,183 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODE_EVENTS_H_
+#define V8_CODE_EVENTS_H_
+
+#include <unordered_set>
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class AbstractCode;
+class Name;
+class SharedFunctionInfo;
+class String;
+
+#define LOG_EVENTS_AND_TAGS_LIST(V)                                      \
+  V(CODE_CREATION_EVENT, "code-creation")                                \
+  V(CODE_DISABLE_OPT_EVENT, "code-disable-optimization")                 \
+  V(CODE_MOVE_EVENT, "code-move")                                        \
+  V(CODE_DELETE_EVENT, "code-delete")                                    \
+  V(CODE_MOVING_GC, "code-moving-gc")                                    \
+  V(SHARED_FUNC_MOVE_EVENT, "sfi-move")                                  \
+  V(SNAPSHOT_CODE_NAME_EVENT, "snapshot-code-name")                      \
+  V(TICK_EVENT, "tick")                                                  \
+  V(REPEAT_META_EVENT, "repeat")                                         \
+  V(BUILTIN_TAG, "Builtin")                                              \
+  V(CALL_DEBUG_BREAK_TAG, "CallDebugBreak")                              \
+  V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn")            \
+  V(CALL_INITIALIZE_TAG, "CallInitialize")                               \
+  V(CALL_MEGAMORPHIC_TAG, "CallMegamorphic")                             \
+  V(CALL_MISS_TAG, "CallMiss")                                           \
+  V(CALL_NORMAL_TAG, "CallNormal")                                       \
+  V(LOAD_INITIALIZE_TAG, "LoadInitialize")                               \
+  V(LOAD_MEGAMORPHIC_TAG, "LoadMegamorphic")                             \
+  V(STORE_INITIALIZE_TAG, "StoreInitialize")                             \
+  V(STORE_GENERIC_TAG, "StoreGeneric")                                   \
+  V(STORE_MEGAMORPHIC_TAG, "StoreMegamorphic")                           \
+  V(KEYED_CALL_DEBUG_BREAK_TAG, "KeyedCallDebugBreak")                   \
+  V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG, "KeyedCallDebugPrepareStepIn") \
+  V(KEYED_CALL_INITIALIZE_TAG, "KeyedCallInitialize")                    \
+  V(KEYED_CALL_MEGAMORPHIC_TAG, "KeyedCallMegamorphic")                  \
+  V(KEYED_CALL_MISS_TAG, "KeyedCallMiss")                                \
+  V(KEYED_CALL_NORMAL_TAG, "KeyedCallNormal")                            \
+  V(CALLBACK_TAG, "Callback")                                            \
+  V(EVAL_TAG, "Eval")                                                    \
+  V(FUNCTION_TAG, "Function")                                            \
+  V(HANDLER_TAG, "Handler")                                              \
+  V(BYTECODE_HANDLER_TAG, "BytecodeHandler")                             \
+  V(KEYED_LOAD_IC_TAG, "KeyedLoadIC")                                    \
+  V(KEYED_LOAD_POLYMORPHIC_IC_TAG, "KeyedLoadPolymorphicIC")             \
+  V(KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG, "KeyedExternalArrayLoadIC")        \
+  V(KEYED_STORE_IC_TAG, "KeyedStoreIC")                                  \
+  V(KEYED_STORE_POLYMORPHIC_IC_TAG, "KeyedStorePolymorphicIC")           \
+  V(KEYED_EXTERNAL_ARRAY_STORE_IC_TAG, "KeyedExternalArrayStoreIC")      \
+  V(LAZY_COMPILE_TAG, "LazyCompile")                                     \
+  V(CALL_IC_TAG, "CallIC")                                               \
+  V(LOAD_IC_TAG, "LoadIC")                                               \
+  V(LOAD_GLOBAL_IC_TAG, "LoadGlobalIC")                                  \
+  V(LOAD_POLYMORPHIC_IC_TAG, "LoadPolymorphicIC")                        \
+  V(REG_EXP_TAG, "RegExp")                                               \
+  V(SCRIPT_TAG, "Script")                                                \
+  V(STORE_IC_TAG, "StoreIC")                                             \
+  V(STORE_POLYMORPHIC_IC_TAG, "StorePolymorphicIC")                      \
+  V(STUB_TAG, "Stub")                                                    \
+  V(NATIVE_FUNCTION_TAG, "Function")                                     \
+  V(NATIVE_LAZY_COMPILE_TAG, "LazyCompile")                              \
+  V(NATIVE_SCRIPT_TAG, "Script")
+// Note that 'NATIVE_' cases for functions and scripts are mapped onto
+// original tags when writing to the log.
+
+#define PROFILE(the_isolate, Call) (the_isolate)->code_event_dispatcher()->Call;
+
+class CodeEventListener {
+ public:
+#define DECLARE_ENUM(enum_item, _) enum_item,
+  enum LogEventsAndTags {
+    LOG_EVENTS_AND_TAGS_LIST(DECLARE_ENUM) NUMBER_OF_LOG_EVENTS
+  };
+#undef DECLARE_ENUM
+
+  virtual ~CodeEventListener() {}
+
+  virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+                               const char* comment) = 0;
+  virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+                               Name* name) = 0;
+  virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+                               SharedFunctionInfo* shared, Name* name) = 0;
+  virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+                               SharedFunctionInfo* shared, Name* source,
+                               int line, int column) = 0;
+  virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+                               int args_count) = 0;
+  virtual void CallbackEvent(Name* name, Address entry_point) = 0;
+  virtual void GetterCallbackEvent(Name* name, Address entry_point) = 0;
+  virtual void SetterCallbackEvent(Name* name, Address entry_point) = 0;
+  virtual void RegExpCodeCreateEvent(AbstractCode* code, String* source) = 0;
+  virtual void CodeMoveEvent(AbstractCode* from, Address to) = 0;
+  virtual void SharedFunctionInfoMoveEvent(Address from, Address to) = 0;
+  virtual void CodeMovingGCEvent() = 0;
+  virtual void CodeDisableOptEvent(AbstractCode* code,
+                                   SharedFunctionInfo* shared) = 0;
+  virtual void CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta) = 0;
+};
+
+class CodeEventDispatcher {
+ public:
+  using LogEventsAndTags = CodeEventListener::LogEventsAndTags;
+
+  CodeEventDispatcher() {}
+
+  bool AddListener(CodeEventListener* listener) {
+    return listeners_.insert(listener).second;
+  }
+  void RemoveListener(CodeEventListener* listener) {
+    listeners_.erase(listener);
+  }
+
+#define CODE_EVENT_DISPATCH(code) \
+  for (auto it = listeners_.begin(); it != listeners_.end(); ++it) (*it)->code
+
+  void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+                       const char* comment) {
+    CODE_EVENT_DISPATCH(CodeCreateEvent(tag, code, comment));
+  }
+  void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code, Name* name) {
+    CODE_EVENT_DISPATCH(CodeCreateEvent(tag, code, name));
+  }
+  void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+                       SharedFunctionInfo* shared, Name* name) {
+    CODE_EVENT_DISPATCH(CodeCreateEvent(tag, code, shared, name));
+  }
+  void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+                       SharedFunctionInfo* shared, Name* source, int line,
+                       int column) {
+    CODE_EVENT_DISPATCH(
+        CodeCreateEvent(tag, code, shared, source, line, column));
+  }
+  void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+                       int args_count) {
+    CODE_EVENT_DISPATCH(CodeCreateEvent(tag, code, args_count));
+  }
+  void CallbackEvent(Name* name, Address entry_point) {
+    CODE_EVENT_DISPATCH(CallbackEvent(name, entry_point));
+  }
+  void GetterCallbackEvent(Name* name, Address entry_point) {
+    CODE_EVENT_DISPATCH(GetterCallbackEvent(name, entry_point));
+  }
+  void SetterCallbackEvent(Name* name, Address entry_point) {
+    CODE_EVENT_DISPATCH(SetterCallbackEvent(name, entry_point));
+  }
+  void RegExpCodeCreateEvent(AbstractCode* code, String* source) {
+    CODE_EVENT_DISPATCH(RegExpCodeCreateEvent(code, source));
+  }
+  void CodeMoveEvent(AbstractCode* from, Address to) {
+    CODE_EVENT_DISPATCH(CodeMoveEvent(from, to));
+  }
+  void SharedFunctionInfoMoveEvent(Address from, Address to) {
+    CODE_EVENT_DISPATCH(SharedFunctionInfoMoveEvent(from, to));
+  }
+  void CodeMovingGCEvent() { CODE_EVENT_DISPATCH(CodeMovingGCEvent()); }
+  void CodeDisableOptEvent(AbstractCode* code, SharedFunctionInfo* shared) {
+    CODE_EVENT_DISPATCH(CodeDisableOptEvent(code, shared));
+  }
+  void CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta) {
+    CODE_EVENT_DISPATCH(CodeDeoptEvent(code, pc, fp_to_sp_delta));
+  }
+#undef CODE_EVENT_DISPATCH
+
+ private:
+  std::unordered_set<CodeEventListener*> listeners_;
+
+  DISALLOW_COPY_AND_ASSIGN(CodeEventDispatcher);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CODE_EVENTS_H_
diff --git a/src/code-factory.cc b/src/code-factory.cc
index dd12b05..944f780 100644
--- a/src/code-factory.cc
+++ b/src/code-factory.cc
@@ -12,8 +12,12 @@
 
 
 // static
-Callable CodeFactory::LoadIC(Isolate* isolate, TypeofMode typeof_mode) {
-  LoadICTrampolineStub stub(isolate, LoadICState(typeof_mode));
+Callable CodeFactory::LoadIC(Isolate* isolate) {
+  if (FLAG_tf_load_ic_stub) {
+    LoadICTrampolineTFStub stub(isolate);
+    return Callable(stub.GetCode(), LoadDescriptor(isolate));
+  }
+  LoadICTrampolineStub stub(isolate);
   return Callable(stub.GetCode(), LoadDescriptor(isolate));
 }
 
@@ -24,32 +28,37 @@
 }
 
 // static
-Callable CodeFactory::LoadICInOptimizedCode(
-    Isolate* isolate, TypeofMode typeof_mode,
-    InlineCacheState initialization_state) {
-  auto code = LoadIC::initialize_stub_in_optimized_code(
-      isolate, LoadICState(typeof_mode).GetExtraICState(),
-      initialization_state);
+Callable CodeFactory::LoadICInOptimizedCode(Isolate* isolate) {
+  auto code = LoadIC::initialize_stub_in_optimized_code(isolate);
   return Callable(code, LoadWithVectorDescriptor(isolate));
 }
 
+// static
+Callable CodeFactory::LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode) {
+  LoadGlobalICTrampolineStub stub(isolate, LoadGlobalICState(typeof_mode));
+  return Callable(stub.GetCode(), LoadGlobalDescriptor(isolate));
+}
+
+// static
+Callable CodeFactory::LoadGlobalICInOptimizedCode(Isolate* isolate,
+                                                  TypeofMode typeof_mode) {
+  auto code = LoadGlobalIC::initialize_stub_in_optimized_code(
+      isolate, LoadGlobalICState(typeof_mode).GetExtraICState());
+  return Callable(code, LoadGlobalWithVectorDescriptor(isolate));
+}
 
 // static
 Callable CodeFactory::KeyedLoadIC(Isolate* isolate) {
-  KeyedLoadICTrampolineStub stub(isolate, LoadICState(kNoExtraICState));
+  KeyedLoadICTrampolineStub stub(isolate);
   return Callable(stub.GetCode(), LoadDescriptor(isolate));
 }
 
 
 // static
-Callable CodeFactory::KeyedLoadICInOptimizedCode(
-    Isolate* isolate, InlineCacheState initialization_state) {
-  auto code = KeyedLoadIC::initialize_stub_in_optimized_code(
-      isolate, initialization_state, kNoExtraICState);
-  if (initialization_state != MEGAMORPHIC) {
-    return Callable(code, LoadWithVectorDescriptor(isolate));
-  }
-  return Callable(code, LoadDescriptor(isolate));
+Callable CodeFactory::KeyedLoadICInOptimizedCode(Isolate* isolate) {
+  auto code =
+      KeyedLoadIC::initialize_stub_in_optimized_code(isolate, kNoExtraICState);
+  return Callable(code, LoadWithVectorDescriptor(isolate));
 }
 
 
@@ -80,15 +89,12 @@
 
 
 // static
-Callable CodeFactory::StoreICInOptimizedCode(
-    Isolate* isolate, LanguageMode language_mode,
-    InlineCacheState initialization_state) {
-  CallInterfaceDescriptor descriptor = initialization_state != MEGAMORPHIC
-                                           ? VectorStoreICDescriptor(isolate)
-                                           : StoreDescriptor(isolate);
-  return Callable(StoreIC::initialize_stub_in_optimized_code(
-                      isolate, language_mode, initialization_state),
-                  descriptor);
+Callable CodeFactory::StoreICInOptimizedCode(Isolate* isolate,
+                                             LanguageMode language_mode) {
+  CallInterfaceDescriptor descriptor = VectorStoreICDescriptor(isolate);
+  return Callable(
+      StoreIC::initialize_stub_in_optimized_code(isolate, language_mode),
+      descriptor);
 }
 
 
@@ -101,15 +107,12 @@
 
 
 // static
-Callable CodeFactory::KeyedStoreICInOptimizedCode(
-    Isolate* isolate, LanguageMode language_mode,
-    InlineCacheState initialization_state) {
-  CallInterfaceDescriptor descriptor = initialization_state != MEGAMORPHIC
-                                           ? VectorStoreICDescriptor(isolate)
-                                           : StoreDescriptor(isolate);
-  return Callable(KeyedStoreIC::initialize_stub_in_optimized_code(
-                      isolate, language_mode, initialization_state),
-                  descriptor);
+Callable CodeFactory::KeyedStoreICInOptimizedCode(Isolate* isolate,
+                                                  LanguageMode language_mode) {
+  CallInterfaceDescriptor descriptor = VectorStoreICDescriptor(isolate);
+  return Callable(
+      KeyedStoreIC::initialize_stub_in_optimized_code(isolate, language_mode),
+      descriptor);
 }
 
 
@@ -143,21 +146,21 @@
 
 // static
 Callable CodeFactory::ToNumber(Isolate* isolate) {
-  ToNumberStub stub(isolate);
-  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+  return Callable(isolate->builtins()->ToNumber(),
+                  TypeConversionDescriptor(isolate));
 }
 
 
 // static
 Callable CodeFactory::NonNumberToNumber(Isolate* isolate) {
-  NonNumberToNumberStub stub(isolate);
-  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+  return Callable(isolate->builtins()->NonNumberToNumber(),
+                  TypeConversionDescriptor(isolate));
 }
 
 // static
 Callable CodeFactory::StringToNumber(Isolate* isolate) {
-  StringToNumberStub stub(isolate);
-  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+  return Callable(isolate->builtins()->StringToNumber(),
+                  TypeConversionDescriptor(isolate));
 }
 
 // static
@@ -552,6 +555,12 @@
 }
 
 // static
+Callable CodeFactory::MathPow(Isolate* isolate) {
+  MathPowStub stub(isolate, MathPowStub::ON_STACK);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
 Callable CodeFactory::InterpreterPushArgsAndCall(Isolate* isolate,
                                                  TailCallMode tail_call_mode) {
   return Callable(
diff --git a/src/code-factory.h b/src/code-factory.h
index 7b43cae..7fb7bc5 100644
--- a/src/code-factory.h
+++ b/src/code-factory.h
@@ -32,13 +32,13 @@
 class CodeFactory final {
  public:
   // Initial states for ICs.
-  static Callable LoadIC(Isolate* isolate, TypeofMode typeof_mode);
-  static Callable LoadICInOptimizedCode(Isolate* isolate,
-                                        TypeofMode typeof_mode,
-                                        InlineCacheState initialization_state);
+  static Callable LoadIC(Isolate* isolate);
+  static Callable LoadICInOptimizedCode(Isolate* isolate);
+  static Callable LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode);
+  static Callable LoadGlobalICInOptimizedCode(Isolate* isolate,
+                                              TypeofMode typeof_mode);
   static Callable KeyedLoadIC(Isolate* isolate);
-  static Callable KeyedLoadICInOptimizedCode(
-      Isolate* isolate, InlineCacheState initialization_state);
+  static Callable KeyedLoadICInOptimizedCode(Isolate* isolate);
   static Callable CallIC(Isolate* isolate, int argc,
                          ConvertReceiverMode mode = ConvertReceiverMode::kAny,
                          TailCallMode tail_call_mode = TailCallMode::kDisallow);
@@ -47,12 +47,10 @@
       ConvertReceiverMode mode = ConvertReceiverMode::kAny,
       TailCallMode tail_call_mode = TailCallMode::kDisallow);
   static Callable StoreIC(Isolate* isolate, LanguageMode mode);
-  static Callable StoreICInOptimizedCode(Isolate* isolate, LanguageMode mode,
-                                         InlineCacheState initialization_state);
+  static Callable StoreICInOptimizedCode(Isolate* isolate, LanguageMode mode);
   static Callable KeyedStoreIC(Isolate* isolate, LanguageMode mode);
-  static Callable KeyedStoreICInOptimizedCode(
-      Isolate* isolate, LanguageMode mode,
-      InlineCacheState initialization_state);
+  static Callable KeyedStoreICInOptimizedCode(Isolate* isolate,
+                                              LanguageMode mode);
 
   static Callable ResumeGenerator(Isolate* isolate);
 
@@ -148,6 +146,8 @@
   static Callable ConstructFunction(Isolate* isolate);
   static Callable HasProperty(Isolate* isolate);
 
+  static Callable MathPow(Isolate* isolate);
+
   static Callable InterpreterPushArgsAndCall(Isolate* isolate,
                                              TailCallMode tail_call_mode);
   static Callable InterpreterPushArgsAndConstruct(Isolate* isolate);
diff --git a/src/code-stub-assembler.cc b/src/code-stub-assembler.cc
index 3e26b52..dca2167 100644
--- a/src/code-stub-assembler.cc
+++ b/src/code-stub-assembler.cc
@@ -4,6 +4,9 @@
 
 #include "src/code-stub-assembler.h"
 #include "src/code-factory.h"
+#include "src/frames-inl.h"
+#include "src/frames.h"
+#include "src/ic/stub-cache.h"
 
 namespace v8 {
 namespace internal {
@@ -22,6 +25,18 @@
                                      const char* name)
     : compiler::CodeAssembler(isolate, zone, parameter_count, flags, name) {}
 
+void CodeStubAssembler::Assert(Node* condition) {
+#if defined(DEBUG)
+  Label ok(this);
+  Comment("[ Assert");
+  GotoIf(condition, &ok);
+  DebugBreak();
+  Goto(&ok);
+  Bind(&ok);
+  Comment("] Assert");
+#endif
+}
+
 Node* CodeStubAssembler::BooleanMapConstant() {
   return HeapConstant(isolate()->factory()->boolean_map());
 }
@@ -46,6 +61,14 @@
   return LoadRoot(Heap::kUndefinedValueRootIndex);
 }
 
+Node* CodeStubAssembler::TheHoleConstant() {
+  return LoadRoot(Heap::kTheHoleValueRootIndex);
+}
+
+Node* CodeStubAssembler::HashSeed() {
+  return SmiToWord32(LoadRoot(Heap::kHashSeedRootIndex));
+}
+
 Node* CodeStubAssembler::StaleRegisterConstant() {
   return LoadRoot(Heap::kStaleRegisterRootIndex);
 }
@@ -450,6 +473,17 @@
   return InnerAllocate(previous, IntPtrConstant(offset));
 }
 
+compiler::Node* CodeStubAssembler::LoadFromFrame(int offset, MachineType rep) {
+  Node* frame_pointer = LoadFramePointer();
+  return Load(rep, frame_pointer, IntPtrConstant(offset));
+}
+
+compiler::Node* CodeStubAssembler::LoadFromParentFrame(int offset,
+                                                       MachineType rep) {
+  Node* frame_pointer = LoadParentFramePointer();
+  return Load(rep, frame_pointer, IntPtrConstant(offset));
+}
+
 Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
                                           MachineType rep) {
   return Load(rep, buffer, IntPtrConstant(offset));
@@ -460,9 +494,14 @@
   return Load(rep, object, IntPtrConstant(offset - kHeapObjectTag));
 }
 
+Node* CodeStubAssembler::LoadObjectField(Node* object, Node* offset,
+                                         MachineType rep) {
+  return Load(rep, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)));
+}
+
 Node* CodeStubAssembler::LoadHeapNumberValue(Node* object) {
-  return Load(MachineType::Float64(), object,
-              IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag));
+  return LoadObjectField(object, HeapNumber::kValueOffset,
+                         MachineType::Float64());
 }
 
 Node* CodeStubAssembler::LoadMap(Node* object) {
@@ -473,6 +512,15 @@
   return LoadMapInstanceType(LoadMap(object));
 }
 
+void CodeStubAssembler::AssertInstanceType(Node* object,
+                                           InstanceType instance_type) {
+  Assert(Word32Equal(LoadInstanceType(object), Int32Constant(instance_type)));
+}
+
+Node* CodeStubAssembler::LoadProperties(Node* object) {
+  return LoadObjectField(object, JSObject::kPropertiesOffset);
+}
+
 Node* CodeStubAssembler::LoadElements(Node* object) {
   return LoadObjectField(object, JSObject::kElementsOffset);
 }
@@ -482,23 +530,19 @@
 }
 
 Node* CodeStubAssembler::LoadMapBitField(Node* map) {
-  return Load(MachineType::Uint8(), map,
-              IntPtrConstant(Map::kBitFieldOffset - kHeapObjectTag));
+  return LoadObjectField(map, Map::kBitFieldOffset, MachineType::Uint8());
 }
 
 Node* CodeStubAssembler::LoadMapBitField2(Node* map) {
-  return Load(MachineType::Uint8(), map,
-              IntPtrConstant(Map::kBitField2Offset - kHeapObjectTag));
+  return LoadObjectField(map, Map::kBitField2Offset, MachineType::Uint8());
 }
 
 Node* CodeStubAssembler::LoadMapBitField3(Node* map) {
-  return Load(MachineType::Uint32(), map,
-              IntPtrConstant(Map::kBitField3Offset - kHeapObjectTag));
+  return LoadObjectField(map, Map::kBitField3Offset, MachineType::Uint32());
 }
 
 Node* CodeStubAssembler::LoadMapInstanceType(Node* map) {
-  return Load(MachineType::Uint8(), map,
-              IntPtrConstant(Map::kInstanceTypeOffset - kHeapObjectTag));
+  return LoadObjectField(map, Map::kInstanceTypeOffset, MachineType::Uint8());
 }
 
 Node* CodeStubAssembler::LoadMapDescriptors(Node* map) {
@@ -509,9 +553,49 @@
   return LoadObjectField(map, Map::kPrototypeOffset);
 }
 
-Node* CodeStubAssembler::LoadNameHash(Node* name) {
-  return Load(MachineType::Uint32(), name,
-              IntPtrConstant(Name::kHashFieldOffset - kHeapObjectTag));
+Node* CodeStubAssembler::LoadMapInstanceSize(Node* map) {
+  return LoadObjectField(map, Map::kInstanceSizeOffset, MachineType::Uint8());
+}
+
+Node* CodeStubAssembler::LoadMapInobjectProperties(Node* map) {
+  // See Map::GetInObjectProperties() for details.
+  STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+  Assert(Int32GreaterThanOrEqual(LoadMapInstanceType(map),
+                                 Int32Constant(FIRST_JS_OBJECT_TYPE)));
+  return LoadObjectField(
+      map, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset,
+      MachineType::Uint8());
+}
+
+Node* CodeStubAssembler::LoadNameHashField(Node* name) {
+  return LoadObjectField(name, Name::kHashFieldOffset, MachineType::Uint32());
+}
+
+Node* CodeStubAssembler::LoadNameHash(Node* name, Label* if_hash_not_computed) {
+  Node* hash_field = LoadNameHashField(name);
+  if (if_hash_not_computed != nullptr) {
+    GotoIf(WordEqual(
+               Word32And(hash_field, Int32Constant(Name::kHashNotComputedMask)),
+               Int32Constant(0)),
+           if_hash_not_computed);
+  }
+  return Word32Shr(hash_field, Int32Constant(Name::kHashShift));
+}
+
+Node* CodeStubAssembler::LoadStringLength(Node* object) {
+  return LoadObjectField(object, String::kLengthOffset);
+}
+
+Node* CodeStubAssembler::LoadJSValueValue(Node* object) {
+  return LoadObjectField(object, JSValue::kValueOffset);
+}
+
+Node* CodeStubAssembler::LoadWeakCellValue(Node* weak_cell, Label* if_cleared) {
+  Node* value = LoadObjectField(weak_cell, WeakCell::kValueOffset);
+  if (if_cleared != nullptr) {
+    GotoIf(WordEqual(value, IntPtrConstant(0)), if_cleared);
+  }
+  return value;
 }
 
 Node* CodeStubAssembler::AllocateUninitializedFixedArray(Node* length) {
@@ -537,9 +621,14 @@
   return Load(MachineType::AnyTagged(), object, offset);
 }
 
-Node* CodeStubAssembler::LoadMapInstanceSize(Node* map) {
-  return Load(MachineType::Uint8(), map,
-              IntPtrConstant(Map::kInstanceSizeOffset - kHeapObjectTag));
+Node* CodeStubAssembler::LoadFixedDoubleArrayElement(
+    Node* object, Node* index_node, MachineType machine_type,
+    int additional_offset, ParameterMode parameter_mode) {
+  int32_t header_size =
+      FixedDoubleArray::kHeaderSize + additional_offset - kHeapObjectTag;
+  Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_DOUBLE_ELEMENTS,
+                                        parameter_mode, header_size);
+  return Load(machine_type, object, offset);
 }
 
 Node* CodeStubAssembler::LoadNativeContext(Node* context) {
@@ -620,21 +709,107 @@
   StoreMapNoWriteBarrier(result, LoadRoot(Heap::kOneByteStringMapRootIndex));
   StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
                                  SmiConstant(Smi::FromInt(length)));
-  StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldSlot,
-                                 IntPtrConstant(String::kEmptyHashField));
+  StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldOffset,
+                                 IntPtrConstant(String::kEmptyHashField),
+                                 MachineRepresentation::kWord32);
   return result;
 }
 
+Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context, Node* length) {
+  Variable var_result(this, MachineRepresentation::kTagged);
+
+  // Compute the SeqOneByteString size and check if it fits into new space.
+  Label if_sizeissmall(this), if_notsizeissmall(this, Label::kDeferred),
+      if_join(this);
+  Node* size = WordAnd(
+      IntPtrAdd(
+          IntPtrAdd(length, IntPtrConstant(SeqOneByteString::kHeaderSize)),
+          IntPtrConstant(kObjectAlignmentMask)),
+      IntPtrConstant(~kObjectAlignmentMask));
+  Branch(IntPtrLessThanOrEqual(size,
+                               IntPtrConstant(Page::kMaxRegularHeapObjectSize)),
+         &if_sizeissmall, &if_notsizeissmall);
+
+  Bind(&if_sizeissmall);
+  {
+    // Just allocate the SeqOneByteString in new space.
+    Node* result = Allocate(size);
+    StoreMapNoWriteBarrier(result, LoadRoot(Heap::kOneByteStringMapRootIndex));
+    StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
+                                   SmiFromWord(length));
+    StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldOffset,
+                                   IntPtrConstant(String::kEmptyHashField),
+                                   MachineRepresentation::kWord32);
+    var_result.Bind(result);
+    Goto(&if_join);
+  }
+
+  Bind(&if_notsizeissmall);
+  {
+    // We might need to allocate in large object space, go to the runtime.
+    Node* result = CallRuntime(Runtime::kAllocateSeqOneByteString, context,
+                               SmiFromWord(length));
+    var_result.Bind(result);
+    Goto(&if_join);
+  }
+
+  Bind(&if_join);
+  return var_result.value();
+}
+
 Node* CodeStubAssembler::AllocateSeqTwoByteString(int length) {
   Node* result = Allocate(SeqTwoByteString::SizeFor(length));
   StoreMapNoWriteBarrier(result, LoadRoot(Heap::kStringMapRootIndex));
   StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
                                  SmiConstant(Smi::FromInt(length)));
-  StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldSlot,
-                                 IntPtrConstant(String::kEmptyHashField));
+  StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldOffset,
+                                 IntPtrConstant(String::kEmptyHashField),
+                                 MachineRepresentation::kWord32);
   return result;
 }
 
+Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context, Node* length) {
+  Variable var_result(this, MachineRepresentation::kTagged);
+
+  // Compute the SeqTwoByteString size and check if it fits into new space.
+  Label if_sizeissmall(this), if_notsizeissmall(this, Label::kDeferred),
+      if_join(this);
+  Node* size = WordAnd(
+      IntPtrAdd(IntPtrAdd(WordShl(length, 1),
+                          IntPtrConstant(SeqTwoByteString::kHeaderSize)),
+                IntPtrConstant(kObjectAlignmentMask)),
+      IntPtrConstant(~kObjectAlignmentMask));
+  Branch(IntPtrLessThanOrEqual(size,
+                               IntPtrConstant(Page::kMaxRegularHeapObjectSize)),
+         &if_sizeissmall, &if_notsizeissmall);
+
+  Bind(&if_sizeissmall);
+  {
+    // Just allocate the SeqTwoByteString in new space.
+    Node* result = Allocate(size);
+    StoreMapNoWriteBarrier(result, LoadRoot(Heap::kStringMapRootIndex));
+    StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
+                                   SmiFromWord(length));
+    StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldOffset,
+                                   IntPtrConstant(String::kEmptyHashField),
+                                   MachineRepresentation::kWord32);
+    var_result.Bind(result);
+    Goto(&if_join);
+  }
+
+  Bind(&if_notsizeissmall);
+  {
+    // We might need to allocate in large object space, go to the runtime.
+    Node* result = CallRuntime(Runtime::kAllocateSeqTwoByteString, context,
+                               SmiFromWord(length));
+    var_result.Bind(result);
+    Goto(&if_join);
+  }
+
+  Bind(&if_join);
+  return var_result.value();
+}
+
 Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
                                          Node* capacity_node, Node* length_node,
                                          compiler::Node* allocation_site,
@@ -643,6 +818,8 @@
   int base_size = JSArray::kSize + FixedArray::kHeaderSize;
   int elements_offset = JSArray::kSize;
 
+  Comment("begin allocation of JSArray");
+
   if (allocation_site != nullptr) {
     base_size += AllocationMemento::kSize;
     elements_offset += AllocationMemento::kSize;
@@ -714,8 +891,49 @@
       }
     }
   } else {
-    // TODO(danno): Add a loop for initialization
-    UNIMPLEMENTED();
+    Variable current(this, MachineRepresentation::kTagged);
+    Label test(this);
+    Label decrement(this, &current);
+    Label done(this);
+    Node* limit = IntPtrAdd(elements, IntPtrConstant(first_element_offset));
+    current.Bind(
+        IntPtrAdd(limit, ElementOffsetFromIndex(capacity_node, kind, mode, 0)));
+
+    Branch(WordEqual(current.value(), limit), &done, &decrement);
+
+    Bind(&decrement);
+    current.Bind(IntPtrSub(
+        current.value(),
+        Int32Constant(IsFastDoubleElementsKind(kind) ? kDoubleSize
+                                                     : kPointerSize)));
+    if (is_double) {
+      // Don't use doubles to store the hole double, since manipulating the
+      // signaling NaN used for the hole in C++, e.g. with bit_cast, will
+      // change its value on ia32 (the x87 stack is used to return values
+      // and stores to the stack silently clear the signalling bit).
+      //
+      // TODO(danno): When we have a Float32/Float64 wrapper class that
+      // preserves double bits during manipulation, remove this code/change
+      // this to an indexed Float64 store.
+      if (Is64()) {
+        StoreNoWriteBarrier(MachineRepresentation::kWord64, current.value(),
+                            double_hole);
+      } else {
+        StoreNoWriteBarrier(MachineRepresentation::kWord32, current.value(),
+                            double_hole);
+        StoreNoWriteBarrier(
+            MachineRepresentation::kWord32,
+            IntPtrAdd(current.value(), Int32Constant(kPointerSize)),
+            double_hole);
+      }
+    } else {
+      StoreNoWriteBarrier(MachineRepresentation::kTagged, current.value(),
+                          hole);
+    }
+    Node* compare = WordNotEqual(current.value(), limit);
+    Branch(compare, &decrement, &done);
+
+    Bind(&done);
   }
 
   return array;
@@ -1256,19 +1474,46 @@
                    Int32Constant(shift));
 }
 
+void CodeStubAssembler::SetCounter(StatsCounter* counter, int value) {
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    Node* counter_address = ExternalConstant(ExternalReference(counter));
+    StoreNoWriteBarrier(MachineRepresentation::kWord32, counter_address,
+                        Int32Constant(value));
+  }
+}
+
+void CodeStubAssembler::IncrementCounter(StatsCounter* counter, int delta) {
+  DCHECK(delta > 0);
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    Node* counter_address = ExternalConstant(ExternalReference(counter));
+    Node* value = Load(MachineType::Int32(), counter_address);
+    value = Int32Add(value, Int32Constant(delta));
+    StoreNoWriteBarrier(MachineRepresentation::kWord32, counter_address, value);
+  }
+}
+
+void CodeStubAssembler::DecrementCounter(StatsCounter* counter, int delta) {
+  DCHECK(delta > 0);
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    Node* counter_address = ExternalConstant(ExternalReference(counter));
+    Node* value = Load(MachineType::Int32(), counter_address);
+    value = Int32Sub(value, Int32Constant(delta));
+    StoreNoWriteBarrier(MachineRepresentation::kWord32, counter_address, value);
+  }
+}
+
 void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex,
                                   Variable* var_index, Label* if_keyisunique,
-                                  Label* call_runtime) {
+                                  Label* if_bailout) {
   DCHECK_EQ(MachineRepresentation::kWord32, var_index->rep());
+  Comment("TryToName");
 
   Label if_keyissmi(this), if_keyisnotsmi(this);
   Branch(WordIsSmi(key), &if_keyissmi, &if_keyisnotsmi);
   Bind(&if_keyissmi);
   {
     // Negative smi keys are named properties. Handle in the runtime.
-    Label if_keyispositive(this);
-    Branch(WordIsPositiveSmi(key), &if_keyispositive, call_runtime);
-    Bind(&if_keyispositive);
+    GotoUnless(WordIsPositiveSmi(key), if_bailout);
 
     var_index->Bind(SmiToWord32(key));
     Goto(if_keyisindex);
@@ -1277,125 +1522,659 @@
   Bind(&if_keyisnotsmi);
 
   Node* key_instance_type = LoadInstanceType(key);
-  Label if_keyisnotsymbol(this);
-  Branch(Word32Equal(key_instance_type, Int32Constant(SYMBOL_TYPE)),
-         if_keyisunique, &if_keyisnotsymbol);
-  Bind(&if_keyisnotsymbol);
-  {
-    Label if_keyisinternalized(this);
-    Node* bits =
-        WordAnd(key_instance_type,
-                Int32Constant(kIsNotStringMask | kIsNotInternalizedMask));
-    Branch(Word32Equal(bits, Int32Constant(kStringTag | kInternalizedTag)),
-           &if_keyisinternalized, call_runtime);
-    Bind(&if_keyisinternalized);
+  // Symbols are unique.
+  GotoIf(Word32Equal(key_instance_type, Int32Constant(SYMBOL_TYPE)),
+         if_keyisunique);
 
-    // Check whether the key is an array index passed in as string. Handle
-    // uniform with smi keys if so.
-    // TODO(verwaest): Also support non-internalized strings.
-    Node* hash = LoadNameHash(key);
-    Node* bit =
-        Word32And(hash, Int32Constant(internal::Name::kIsNotArrayIndexMask));
-    Label if_isarrayindex(this);
-    Branch(Word32Equal(bit, Int32Constant(0)), &if_isarrayindex,
-           if_keyisunique);
-    Bind(&if_isarrayindex);
-    var_index->Bind(BitFieldDecode<internal::Name::ArrayIndexValueBits>(hash));
-    Goto(if_keyisindex);
-  }
+  Label if_keyisinternalized(this);
+  Node* bits =
+      WordAnd(key_instance_type,
+              Int32Constant(kIsNotStringMask | kIsNotInternalizedMask));
+  Branch(Word32Equal(bits, Int32Constant(kStringTag | kInternalizedTag)),
+         &if_keyisinternalized, if_bailout);
+  Bind(&if_keyisinternalized);
+
+  // Check whether the key is an array index passed in as string. Handle
+  // uniform with smi keys if so.
+  // TODO(verwaest): Also support non-internalized strings.
+  Node* hash = LoadNameHashField(key);
+  Node* bit = Word32And(hash, Int32Constant(Name::kIsNotArrayIndexMask));
+  GotoIf(Word32NotEqual(bit, Int32Constant(0)), if_keyisunique);
+  // Key is an index. Check if it is small enough to be encoded in the
+  // hash_field. Handle too big array index in runtime.
+  bit = Word32And(hash, Int32Constant(Name::kContainsCachedArrayIndexMask));
+  GotoIf(Word32NotEqual(bit, Int32Constant(0)), if_bailout);
+  var_index->Bind(BitFieldDecode<Name::ArrayIndexValueBits>(hash));
+  Goto(if_keyisindex);
 }
 
-void CodeStubAssembler::TryLookupProperty(Node* object, Node* map,
-                                          Node* instance_type, Node* name,
-                                          Label* if_found, Label* if_not_found,
-                                          Label* call_runtime) {
-  {
-    Label if_objectissimple(this);
-    Branch(Int32LessThanOrEqual(instance_type,
-                                Int32Constant(LAST_SPECIAL_RECEIVER_TYPE)),
-           call_runtime, &if_objectissimple);
-    Bind(&if_objectissimple);
+template <typename Dictionary>
+Node* CodeStubAssembler::EntryToIndex(Node* entry, int field_index) {
+  Node* entry_index = Int32Mul(entry, Int32Constant(Dictionary::kEntrySize));
+  return Int32Add(entry_index,
+                  Int32Constant(Dictionary::kElementsStartIndex + field_index));
+}
+
+template <typename Dictionary>
+void CodeStubAssembler::NameDictionaryLookup(Node* dictionary,
+                                             Node* unique_name, Label* if_found,
+                                             Variable* var_name_index,
+                                             Label* if_not_found,
+                                             int inlined_probes) {
+  DCHECK_EQ(MachineRepresentation::kWord32, var_name_index->rep());
+  Comment("NameDictionaryLookup");
+
+  Node* capacity = SmiToWord32(LoadFixedArrayElement(
+      dictionary, Int32Constant(Dictionary::kCapacityIndex)));
+  Node* mask = Int32Sub(capacity, Int32Constant(1));
+  Node* hash = LoadNameHash(unique_name);
+
+  // See Dictionary::FirstProbe().
+  Node* count = Int32Constant(0);
+  Node* entry = Word32And(hash, mask);
+
+  for (int i = 0; i < inlined_probes; i++) {
+    Node* index = EntryToIndex<Dictionary>(entry);
+    var_name_index->Bind(index);
+
+    Node* current = LoadFixedArrayElement(dictionary, index);
+    GotoIf(WordEqual(current, unique_name), if_found);
+
+    // See Dictionary::NextProbe().
+    count = Int32Constant(i + 1);
+    entry = Word32And(Int32Add(entry, count), mask);
   }
 
-  // TODO(verwaest): Perform a dictonary lookup on slow-mode receivers.
-  Node* bit_field3 = LoadMapBitField3(map);
-  Node* bit = BitFieldDecode<Map::DictionaryMap>(bit_field3);
-  Label if_isfastmap(this);
-  Branch(Word32Equal(bit, Int32Constant(0)), &if_isfastmap, call_runtime);
-  Bind(&if_isfastmap);
-  Node* nof = BitFieldDecode<Map::NumberOfOwnDescriptorsBits>(bit_field3);
-  // Bail out to the runtime for large numbers of own descriptors. The stub only
-  // does linear search, which becomes too expensive in that case.
-  {
-    static const int32_t kMaxLinear = 210;
-    Label above_max(this), below_max(this);
-    Branch(Int32LessThanOrEqual(nof, Int32Constant(kMaxLinear)), &below_max,
-           call_runtime);
-    Bind(&below_max);
-  }
-  Node* descriptors = LoadMapDescriptors(map);
+  Node* undefined = UndefinedConstant();
 
-  Variable var_descriptor(this, MachineRepresentation::kWord32);
-  Label loop(this, &var_descriptor);
-  var_descriptor.Bind(Int32Constant(0));
+  Variable var_count(this, MachineRepresentation::kWord32);
+  Variable var_entry(this, MachineRepresentation::kWord32);
+  Variable* loop_vars[] = {&var_count, &var_entry, var_name_index};
+  Label loop(this, 3, loop_vars);
+  var_count.Bind(count);
+  var_entry.Bind(entry);
   Goto(&loop);
   Bind(&loop);
   {
-    Node* index = var_descriptor.value();
-    Node* offset = Int32Constant(DescriptorArray::ToKeyIndex(0));
-    Node* factor = Int32Constant(DescriptorArray::kDescriptorSize);
-    Label if_notdone(this);
-    Branch(Word32Equal(index, nof), if_not_found, &if_notdone);
-    Bind(&if_notdone);
+    Node* count = var_count.value();
+    Node* entry = var_entry.value();
+
+    Node* index = EntryToIndex<Dictionary>(entry);
+    var_name_index->Bind(index);
+
+    Node* current = LoadFixedArrayElement(dictionary, index);
+    GotoIf(WordEqual(current, undefined), if_not_found);
+    GotoIf(WordEqual(current, unique_name), if_found);
+
+    // See Dictionary::NextProbe().
+    count = Int32Add(count, Int32Constant(1));
+    entry = Word32And(Int32Add(entry, count), mask);
+
+    var_count.Bind(count);
+    var_entry.Bind(entry);
+    Goto(&loop);
+  }
+}
+
+// Instantiate template methods to workaround GCC compilation issue.
+template void CodeStubAssembler::NameDictionaryLookup<NameDictionary>(
+    Node*, Node*, Label*, Variable*, Label*, int);
+template void CodeStubAssembler::NameDictionaryLookup<GlobalDictionary>(
+    Node*, Node*, Label*, Variable*, Label*, int);
+
+Node* CodeStubAssembler::ComputeIntegerHash(Node* key, Node* seed) {
+  // See v8::internal::ComputeIntegerHash()
+  Node* hash = key;
+  hash = Word32Xor(hash, seed);
+  hash = Int32Add(Word32Xor(hash, Int32Constant(0xffffffff)),
+                  Word32Shl(hash, Int32Constant(15)));
+  hash = Word32Xor(hash, Word32Shr(hash, Int32Constant(12)));
+  hash = Int32Add(hash, Word32Shl(hash, Int32Constant(2)));
+  hash = Word32Xor(hash, Word32Shr(hash, Int32Constant(4)));
+  hash = Int32Mul(hash, Int32Constant(2057));
+  hash = Word32Xor(hash, Word32Shr(hash, Int32Constant(16)));
+  return Word32And(hash, Int32Constant(0x3fffffff));
+}
+
+template <typename Dictionary>
+void CodeStubAssembler::NumberDictionaryLookup(Node* dictionary, Node* key,
+                                               Label* if_found,
+                                               Variable* var_entry,
+                                               Label* if_not_found) {
+  DCHECK_EQ(MachineRepresentation::kWord32, var_entry->rep());
+  Comment("NumberDictionaryLookup");
+
+  Node* capacity = SmiToWord32(LoadFixedArrayElement(
+      dictionary, Int32Constant(Dictionary::kCapacityIndex)));
+  Node* mask = Int32Sub(capacity, Int32Constant(1));
+
+  Node* seed;
+  if (Dictionary::ShapeT::UsesSeed) {
+    seed = HashSeed();
+  } else {
+    seed = Int32Constant(kZeroHashSeed);
+  }
+  Node* hash = ComputeIntegerHash(key, seed);
+  Node* key_as_float64 = ChangeUint32ToFloat64(key);
+
+  // See Dictionary::FirstProbe().
+  Node* count = Int32Constant(0);
+  Node* entry = Word32And(hash, mask);
+
+  Node* undefined = UndefinedConstant();
+  Node* the_hole = TheHoleConstant();
+
+  Variable var_count(this, MachineRepresentation::kWord32);
+  Variable* loop_vars[] = {&var_count, var_entry};
+  Label loop(this, 2, loop_vars);
+  var_count.Bind(count);
+  var_entry->Bind(entry);
+  Goto(&loop);
+  Bind(&loop);
+  {
+    Node* count = var_count.value();
+    Node* entry = var_entry->value();
+
+    Node* index = EntryToIndex<Dictionary>(entry);
+    Node* current = LoadFixedArrayElement(dictionary, index);
+    GotoIf(WordEqual(current, undefined), if_not_found);
+    Label next_probe(this);
     {
-      Node* array_index = Int32Add(offset, Int32Mul(index, factor));
-      Node* current = LoadFixedArrayElement(descriptors, array_index);
-      Label if_unequal(this);
-      Branch(WordEqual(current, name), if_found, &if_unequal);
-      Bind(&if_unequal);
+      Label if_currentissmi(this), if_currentisnotsmi(this);
+      Branch(WordIsSmi(current), &if_currentissmi, &if_currentisnotsmi);
+      Bind(&if_currentissmi);
+      {
+        Node* current_value = SmiToWord32(current);
+        Branch(Word32Equal(current_value, key), if_found, &next_probe);
+      }
+      Bind(&if_currentisnotsmi);
+      {
+        GotoIf(WordEqual(current, the_hole), &next_probe);
+        // Current must be the Number.
+        Node* current_value = LoadHeapNumberValue(current);
+        Branch(Float64Equal(current_value, key_as_float64), if_found,
+               &next_probe);
+      }
+    }
+
+    Bind(&next_probe);
+    // See Dictionary::NextProbe().
+    count = Int32Add(count, Int32Constant(1));
+    entry = Word32And(Int32Add(entry, count), mask);
+
+    var_count.Bind(count);
+    var_entry->Bind(entry);
+    Goto(&loop);
+  }
+}
+
+void CodeStubAssembler::TryLookupProperty(
+    Node* object, Node* map, Node* instance_type, Node* unique_name,
+    Label* if_found_fast, Label* if_found_dict, Label* if_found_global,
+    Variable* var_meta_storage, Variable* var_name_index, Label* if_not_found,
+    Label* if_bailout) {
+  DCHECK_EQ(MachineRepresentation::kTagged, var_meta_storage->rep());
+  DCHECK_EQ(MachineRepresentation::kWord32, var_name_index->rep());
+
+  Label if_objectisspecial(this);
+  STATIC_ASSERT(JS_GLOBAL_OBJECT_TYPE <= LAST_SPECIAL_RECEIVER_TYPE);
+  GotoIf(Int32LessThanOrEqual(instance_type,
+                              Int32Constant(LAST_SPECIAL_RECEIVER_TYPE)),
+         &if_objectisspecial);
+
+  Node* bit_field = LoadMapBitField(map);
+  Node* mask = Int32Constant(1 << Map::kHasNamedInterceptor |
+                             1 << Map::kIsAccessCheckNeeded);
+  Assert(Word32Equal(Word32And(bit_field, mask), Int32Constant(0)));
+
+  Node* bit_field3 = LoadMapBitField3(map);
+  Node* bit = BitFieldDecode<Map::DictionaryMap>(bit_field3);
+  Label if_isfastmap(this), if_isslowmap(this);
+  Branch(Word32Equal(bit, Int32Constant(0)), &if_isfastmap, &if_isslowmap);
+  Bind(&if_isfastmap);
+  {
+    Comment("DescriptorArrayLookup");
+    Node* nof = BitFieldDecode<Map::NumberOfOwnDescriptorsBits>(bit_field3);
+    // Bail out to the runtime for large numbers of own descriptors. The stub
+    // only does linear search, which becomes too expensive in that case.
+    {
+      static const int32_t kMaxLinear = 210;
+      GotoIf(Int32GreaterThan(nof, Int32Constant(kMaxLinear)), if_bailout);
+    }
+    Node* descriptors = LoadMapDescriptors(map);
+    var_meta_storage->Bind(descriptors);
+
+    Variable var_descriptor(this, MachineRepresentation::kWord32);
+    Label loop(this, &var_descriptor);
+    var_descriptor.Bind(Int32Constant(0));
+    Goto(&loop);
+    Bind(&loop);
+    {
+      Node* index = var_descriptor.value();
+      Node* name_offset = Int32Constant(DescriptorArray::ToKeyIndex(0));
+      Node* factor = Int32Constant(DescriptorArray::kDescriptorSize);
+      GotoIf(Word32Equal(index, nof), if_not_found);
+
+      Node* name_index = Int32Add(name_offset, Int32Mul(index, factor));
+      Node* name = LoadFixedArrayElement(descriptors, name_index);
+
+      var_name_index->Bind(name_index);
+      GotoIf(WordEqual(name, unique_name), if_found_fast);
 
       var_descriptor.Bind(Int32Add(index, Int32Constant(1)));
       Goto(&loop);
     }
   }
+  Bind(&if_isslowmap);
+  {
+    Node* dictionary = LoadProperties(object);
+    var_meta_storage->Bind(dictionary);
+
+    NameDictionaryLookup<NameDictionary>(dictionary, unique_name, if_found_dict,
+                                         var_name_index, if_not_found);
+  }
+  Bind(&if_objectisspecial);
+  {
+    // Handle global object here and other special objects in runtime.
+    GotoUnless(Word32Equal(instance_type, Int32Constant(JS_GLOBAL_OBJECT_TYPE)),
+               if_bailout);
+
+    // Handle interceptors and access checks in runtime.
+    Node* bit_field = LoadMapBitField(map);
+    Node* mask = Int32Constant(1 << Map::kHasNamedInterceptor |
+                               1 << Map::kIsAccessCheckNeeded);
+    GotoIf(Word32NotEqual(Word32And(bit_field, mask), Int32Constant(0)),
+           if_bailout);
+
+    Node* dictionary = LoadProperties(object);
+    var_meta_storage->Bind(dictionary);
+
+    NameDictionaryLookup<GlobalDictionary>(
+        dictionary, unique_name, if_found_global, var_name_index, if_not_found);
+  }
+}
+
+void CodeStubAssembler::TryHasOwnProperty(compiler::Node* object,
+                                          compiler::Node* map,
+                                          compiler::Node* instance_type,
+                                          compiler::Node* unique_name,
+                                          Label* if_found, Label* if_not_found,
+                                          Label* if_bailout) {
+  Comment("TryHasOwnProperty");
+  Variable var_meta_storage(this, MachineRepresentation::kTagged);
+  Variable var_name_index(this, MachineRepresentation::kWord32);
+
+  Label if_found_global(this);
+  TryLookupProperty(object, map, instance_type, unique_name, if_found, if_found,
+                    &if_found_global, &var_meta_storage, &var_name_index,
+                    if_not_found, if_bailout);
+  Bind(&if_found_global);
+  {
+    Variable var_value(this, MachineRepresentation::kTagged);
+    Variable var_details(this, MachineRepresentation::kWord32);
+    // Check if the property cell is not deleted.
+    LoadPropertyFromGlobalDictionary(var_meta_storage.value(),
+                                     var_name_index.value(), &var_value,
+                                     &var_details, if_not_found);
+    Goto(if_found);
+  }
+}
+
+void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
+                                                   Node* descriptors,
+                                                   Node* name_index,
+                                                   Variable* var_details,
+                                                   Variable* var_value) {
+  DCHECK_EQ(MachineRepresentation::kWord32, var_details->rep());
+  DCHECK_EQ(MachineRepresentation::kTagged, var_value->rep());
+  Comment("[ LoadPropertyFromFastObject");
+
+  const int name_to_details_offset =
+      (DescriptorArray::kDescriptorDetails - DescriptorArray::kDescriptorKey) *
+      kPointerSize;
+  const int name_to_value_offset =
+      (DescriptorArray::kDescriptorValue - DescriptorArray::kDescriptorKey) *
+      kPointerSize;
+
+  Node* details = SmiToWord32(
+      LoadFixedArrayElement(descriptors, name_index, name_to_details_offset));
+  var_details->Bind(details);
+
+  Node* location = BitFieldDecode<PropertyDetails::LocationField>(details);
+
+  Label if_in_field(this), if_in_descriptor(this), done(this);
+  Branch(Word32Equal(location, Int32Constant(kField)), &if_in_field,
+         &if_in_descriptor);
+  Bind(&if_in_field);
+  {
+    Node* field_index =
+        BitFieldDecode<PropertyDetails::FieldIndexField>(details);
+    Node* representation =
+        BitFieldDecode<PropertyDetails::RepresentationField>(details);
+
+    Node* inobject_properties = LoadMapInobjectProperties(map);
+
+    Label if_inobject(this), if_backing_store(this);
+    Variable var_double_value(this, MachineRepresentation::kFloat64);
+    Label rebox_double(this, &var_double_value);
+    BranchIfInt32LessThan(field_index, inobject_properties, &if_inobject,
+                          &if_backing_store);
+    Bind(&if_inobject);
+    {
+      Comment("if_inobject");
+      Node* field_offset = ChangeInt32ToIntPtr(
+          Int32Mul(Int32Sub(LoadMapInstanceSize(map),
+                            Int32Sub(inobject_properties, field_index)),
+                   Int32Constant(kPointerSize)));
+
+      Label if_double(this), if_tagged(this);
+      BranchIfWord32NotEqual(representation,
+                             Int32Constant(Representation::kDouble), &if_tagged,
+                             &if_double);
+      Bind(&if_tagged);
+      {
+        var_value->Bind(LoadObjectField(object, field_offset));
+        Goto(&done);
+      }
+      Bind(&if_double);
+      {
+        if (FLAG_unbox_double_fields) {
+          var_double_value.Bind(
+              LoadObjectField(object, field_offset, MachineType::Float64()));
+        } else {
+          Node* mutable_heap_number = LoadObjectField(object, field_offset);
+          var_double_value.Bind(LoadHeapNumberValue(mutable_heap_number));
+        }
+        Goto(&rebox_double);
+      }
+    }
+    Bind(&if_backing_store);
+    {
+      Comment("if_backing_store");
+      Node* properties = LoadProperties(object);
+      field_index = Int32Sub(field_index, inobject_properties);
+      Node* value = LoadFixedArrayElement(properties, field_index);
+
+      Label if_double(this), if_tagged(this);
+      BranchIfWord32NotEqual(representation,
+                             Int32Constant(Representation::kDouble), &if_tagged,
+                             &if_double);
+      Bind(&if_tagged);
+      {
+        var_value->Bind(value);
+        Goto(&done);
+      }
+      Bind(&if_double);
+      {
+        var_double_value.Bind(LoadHeapNumberValue(value));
+        Goto(&rebox_double);
+      }
+    }
+    Bind(&rebox_double);
+    {
+      Comment("rebox_double");
+      Node* heap_number = AllocateHeapNumber();
+      StoreHeapNumberValue(heap_number, var_double_value.value());
+      var_value->Bind(heap_number);
+      Goto(&done);
+    }
+  }
+  Bind(&if_in_descriptor);
+  {
+    Node* value =
+        LoadFixedArrayElement(descriptors, name_index, name_to_value_offset);
+    var_value->Bind(value);
+    Goto(&done);
+  }
+  Bind(&done);
+
+  Comment("] LoadPropertyFromFastObject");
+}
+
+void CodeStubAssembler::LoadPropertyFromNameDictionary(Node* dictionary,
+                                                       Node* name_index,
+                                                       Variable* var_details,
+                                                       Variable* var_value) {
+  Comment("LoadPropertyFromNameDictionary");
+
+  const int name_to_details_offset =
+      (NameDictionary::kEntryDetailsIndex - NameDictionary::kEntryKeyIndex) *
+      kPointerSize;
+  const int name_to_value_offset =
+      (NameDictionary::kEntryValueIndex - NameDictionary::kEntryKeyIndex) *
+      kPointerSize;
+
+  Node* details = SmiToWord32(
+      LoadFixedArrayElement(dictionary, name_index, name_to_details_offset));
+
+  var_details->Bind(details);
+  var_value->Bind(
+      LoadFixedArrayElement(dictionary, name_index, name_to_value_offset));
+
+  Comment("] LoadPropertyFromNameDictionary");
+}
+
+void CodeStubAssembler::LoadPropertyFromGlobalDictionary(Node* dictionary,
+                                                         Node* name_index,
+                                                         Variable* var_details,
+                                                         Variable* var_value,
+                                                         Label* if_deleted) {
+  Comment("[ LoadPropertyFromGlobalDictionary");
+
+  const int name_to_value_offset =
+      (GlobalDictionary::kEntryValueIndex - GlobalDictionary::kEntryKeyIndex) *
+      kPointerSize;
+
+  Node* property_cell =
+      LoadFixedArrayElement(dictionary, name_index, name_to_value_offset);
+
+  Node* value = LoadObjectField(property_cell, PropertyCell::kValueOffset);
+  GotoIf(WordEqual(value, TheHoleConstant()), if_deleted);
+
+  var_value->Bind(value);
+
+  Node* details =
+      SmiToWord32(LoadObjectField(property_cell, PropertyCell::kDetailsOffset));
+  var_details->Bind(details);
+
+  Comment("] LoadPropertyFromGlobalDictionary");
+}
+
+void CodeStubAssembler::TryGetOwnProperty(
+    Node* context, Node* receiver, Node* object, Node* map, Node* instance_type,
+    Node* unique_name, Label* if_found_value, Variable* var_value,
+    Label* if_not_found, Label* if_bailout) {
+  DCHECK_EQ(MachineRepresentation::kTagged, var_value->rep());
+  Comment("TryGetOwnProperty");
+
+  Variable var_meta_storage(this, MachineRepresentation::kTagged);
+  Variable var_entry(this, MachineRepresentation::kWord32);
+
+  Label if_found_fast(this), if_found_dict(this), if_found_global(this);
+
+  Variable var_details(this, MachineRepresentation::kWord32);
+  Variable* vars[] = {var_value, &var_details};
+  Label if_found(this, 2, vars);
+
+  TryLookupProperty(object, map, instance_type, unique_name, &if_found_fast,
+                    &if_found_dict, &if_found_global, &var_meta_storage,
+                    &var_entry, if_not_found, if_bailout);
+  Bind(&if_found_fast);
+  {
+    Node* descriptors = var_meta_storage.value();
+    Node* name_index = var_entry.value();
+
+    LoadPropertyFromFastObject(object, map, descriptors, name_index,
+                               &var_details, var_value);
+    Goto(&if_found);
+  }
+  Bind(&if_found_dict);
+  {
+    Node* dictionary = var_meta_storage.value();
+    Node* entry = var_entry.value();
+    LoadPropertyFromNameDictionary(dictionary, entry, &var_details, var_value);
+    Goto(&if_found);
+  }
+  Bind(&if_found_global);
+  {
+    Node* dictionary = var_meta_storage.value();
+    Node* entry = var_entry.value();
+
+    LoadPropertyFromGlobalDictionary(dictionary, entry, &var_details, var_value,
+                                     if_not_found);
+    Goto(&if_found);
+  }
+  // Here we have details and value which could be an accessor.
+  Bind(&if_found);
+  {
+    Node* details = var_details.value();
+    Node* kind = BitFieldDecode<PropertyDetails::KindField>(details);
+
+    Label if_accessor(this);
+    Branch(Word32Equal(kind, Int32Constant(kData)), if_found_value,
+           &if_accessor);
+    Bind(&if_accessor);
+    {
+      Node* accessor_pair = var_value->value();
+      GotoIf(Word32Equal(LoadInstanceType(accessor_pair),
+                         Int32Constant(ACCESSOR_INFO_TYPE)),
+             if_bailout);
+      AssertInstanceType(accessor_pair, ACCESSOR_PAIR_TYPE);
+      Node* getter =
+          LoadObjectField(accessor_pair, AccessorPair::kGetterOffset);
+      Node* getter_map = LoadMap(getter);
+      Node* instance_type = LoadMapInstanceType(getter_map);
+      // FunctionTemplateInfo getters are not supported yet.
+      GotoIf(Word32Equal(instance_type,
+                         Int32Constant(FUNCTION_TEMPLATE_INFO_TYPE)),
+             if_bailout);
+
+      // Return undefined if the {getter} is not callable.
+      var_value->Bind(UndefinedConstant());
+      GotoIf(Word32Equal(Word32And(LoadMapBitField(getter_map),
+                                   Int32Constant(1 << Map::kIsCallable)),
+                         Int32Constant(0)),
+             if_found_value);
+
+      // Call the accessor.
+      Callable callable = CodeFactory::Call(isolate());
+      Node* result = CallJS(callable, context, getter, receiver);
+      var_value->Bind(result);
+      Goto(if_found_value);
+    }
+  }
 }
 
 void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
                                          Node* instance_type, Node* index,
                                          Label* if_found, Label* if_not_found,
-                                         Label* call_runtime) {
-  {
-    Label if_objectissimple(this);
-    Branch(Int32LessThanOrEqual(instance_type,
-                                Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
-           call_runtime, &if_objectissimple);
-    Bind(&if_objectissimple);
-  }
+                                         Label* if_bailout) {
+  // Handle special objects in runtime.
+  GotoIf(Int32LessThanOrEqual(instance_type,
+                              Int32Constant(LAST_SPECIAL_RECEIVER_TYPE)),
+         if_bailout);
 
   Node* bit_field2 = LoadMapBitField2(map);
   Node* elements_kind = BitFieldDecode<Map::ElementsKindBits>(bit_field2);
 
   // TODO(verwaest): Support other elements kinds as well.
-  Label if_isobjectorsmi(this);
-  Branch(
-      Int32LessThanOrEqual(elements_kind, Int32Constant(FAST_HOLEY_ELEMENTS)),
-      &if_isobjectorsmi, call_runtime);
+  Label if_isobjectorsmi(this), if_isdouble(this), if_isdictionary(this),
+      if_isfaststringwrapper(this), if_isslowstringwrapper(this);
+  // clang-format off
+  int32_t values[] = {
+      // Handled by {if_isobjectorsmi}.
+      FAST_SMI_ELEMENTS, FAST_HOLEY_SMI_ELEMENTS, FAST_ELEMENTS,
+          FAST_HOLEY_ELEMENTS,
+      // Handled by {if_isdouble}.
+      FAST_DOUBLE_ELEMENTS, FAST_HOLEY_DOUBLE_ELEMENTS,
+      // Handled by {if_isdictionary}.
+      DICTIONARY_ELEMENTS,
+      // Handled by {if_isfaststringwrapper}.
+      FAST_STRING_WRAPPER_ELEMENTS,
+      // Handled by {if_isslowstringwrapper}.
+      SLOW_STRING_WRAPPER_ELEMENTS,
+      // Handled by {if_not_found}.
+      NO_ELEMENTS,
+  };
+  Label* labels[] = {
+      &if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi,
+          &if_isobjectorsmi,
+      &if_isdouble, &if_isdouble,
+      &if_isdictionary,
+      &if_isfaststringwrapper,
+      &if_isslowstringwrapper,
+      if_not_found,
+  };
+  // clang-format on
+  STATIC_ASSERT(arraysize(values) == arraysize(labels));
+  Switch(elements_kind, if_bailout, values, labels, arraysize(values));
+
   Bind(&if_isobjectorsmi);
   {
     Node* elements = LoadElements(object);
     Node* length = LoadFixedArrayBaseLength(elements);
 
-    Label if_iskeyinrange(this);
-    Branch(Int32LessThan(index, SmiToWord32(length)), &if_iskeyinrange,
-           if_not_found);
+    GotoIf(Int32GreaterThanOrEqual(index, SmiToWord32(length)), if_not_found);
 
-    Bind(&if_iskeyinrange);
     Node* element = LoadFixedArrayElement(elements, index);
-    Node* the_hole = LoadRoot(Heap::kTheHoleValueRootIndex);
+    Node* the_hole = TheHoleConstant();
     Branch(WordEqual(element, the_hole), if_not_found, if_found);
   }
+  Bind(&if_isdouble);
+  {
+    Node* elements = LoadElements(object);
+    Node* length = LoadFixedArrayBaseLength(elements);
+
+    GotoIf(Int32GreaterThanOrEqual(index, SmiToWord32(length)), if_not_found);
+
+    if (kPointerSize == kDoubleSize) {
+      Node* element =
+          LoadFixedDoubleArrayElement(elements, index, MachineType::Uint64());
+      Node* the_hole = Int64Constant(kHoleNanInt64);
+      Branch(Word64Equal(element, the_hole), if_not_found, if_found);
+    } else {
+      Node* element_upper =
+          LoadFixedDoubleArrayElement(elements, index, MachineType::Uint32(),
+                                      kIeeeDoubleExponentWordOffset);
+      Branch(Word32Equal(element_upper, Int32Constant(kHoleNanUpper32)),
+             if_not_found, if_found);
+    }
+  }
+  Bind(&if_isdictionary);
+  {
+    Variable var_entry(this, MachineRepresentation::kWord32);
+    Node* elements = LoadElements(object);
+    NumberDictionaryLookup<SeededNumberDictionary>(elements, index, if_found,
+                                                   &var_entry, if_not_found);
+  }
+  Bind(&if_isfaststringwrapper);
+  {
+    AssertInstanceType(object, JS_VALUE_TYPE);
+    Node* string = LoadJSValueValue(object);
+    Assert(Int32LessThan(LoadInstanceType(string),
+                         Int32Constant(FIRST_NONSTRING_TYPE)));
+    Node* length = LoadStringLength(string);
+    GotoIf(Int32LessThan(index, SmiToWord32(length)), if_found);
+    Goto(&if_isobjectorsmi);
+  }
+  Bind(&if_isslowstringwrapper);
+  {
+    AssertInstanceType(object, JS_VALUE_TYPE);
+    Node* string = LoadJSValueValue(object);
+    Assert(Int32LessThan(LoadInstanceType(string),
+                         Int32Constant(FIRST_NONSTRING_TYPE)));
+    Node* length = LoadStringLength(string);
+    GotoIf(Int32LessThan(index, SmiToWord32(length)), if_found);
+    Goto(&if_isdictionary);
+  }
 }
 
+// Instantiate template methods to workaround GCC compilation issue.
+template void CodeStubAssembler::NumberDictionaryLookup<SeededNumberDictionary>(
+    Node*, Node*, Label*, Variable*, Label*);
+template void CodeStubAssembler::NumberDictionaryLookup<
+    UnseededNumberDictionary>(Node*, Node*, Label*, Variable*, Label*);
+
 Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
                                              Node* object) {
   Variable var_result(this, MachineRepresentation::kTagged);
@@ -1500,8 +2279,8 @@
 
     // Check the current {object} prototype.
     Node* object_prototype = LoadMapPrototype(object_map);
-    GotoIf(WordEqual(object_prototype, callable_prototype), &return_true);
     GotoIf(WordEqual(object_prototype, NullConstant()), &return_false);
+    GotoIf(WordEqual(object_prototype, callable_prototype), &return_true);
 
     // Continue with the prototype.
     var_object_map.Bind(LoadMap(object_prototype));
@@ -1568,5 +2347,331 @@
           : WordShr(index_node, IntPtrConstant(-element_size_shift)));
 }
 
+compiler::Node* CodeStubAssembler::LoadTypeFeedbackVectorForStub() {
+  Node* function =
+      LoadFromParentFrame(JavaScriptFrameConstants::kFunctionOffset);
+  Node* literals = LoadObjectField(function, JSFunction::kLiteralsOffset);
+  return LoadObjectField(literals, LiteralsArray::kFeedbackVectorOffset);
+}
+
+compiler::Node* CodeStubAssembler::LoadReceiverMap(compiler::Node* receiver) {
+  Variable var_receiver_map(this, MachineRepresentation::kTagged);
+  // TODO(ishell): defer blocks when it works.
+  Label load_smi_map(this /*, Label::kDeferred*/), load_receiver_map(this),
+      if_result(this);
+
+  Branch(WordIsSmi(receiver), &load_smi_map, &load_receiver_map);
+  Bind(&load_smi_map);
+  {
+    var_receiver_map.Bind(LoadRoot(Heap::kHeapNumberMapRootIndex));
+    Goto(&if_result);
+  }
+  Bind(&load_receiver_map);
+  {
+    var_receiver_map.Bind(LoadMap(receiver));
+    Goto(&if_result);
+  }
+  Bind(&if_result);
+  return var_receiver_map.value();
+}
+
+compiler::Node* CodeStubAssembler::TryMonomorphicCase(
+    const LoadICParameters* p, compiler::Node* receiver_map, Label* if_handler,
+    Variable* var_handler, Label* if_miss) {
+  DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
+
+  // TODO(ishell): add helper class that hides offset computations for a series
+  // of loads.
+  int32_t header_size = FixedArray::kHeaderSize - kHeapObjectTag;
+  Node* offset = ElementOffsetFromIndex(p->slot, FAST_HOLEY_ELEMENTS,
+                                        SMI_PARAMETERS, header_size);
+  Node* feedback = Load(MachineType::AnyTagged(), p->vector, offset);
+
+  // Try to quickly handle the monomorphic case without knowing for sure
+  // if we have a weak cell in feedback. We do know it's safe to look
+  // at WeakCell::kValueOffset.
+  GotoUnless(WordEqual(receiver_map, LoadWeakCellValue(feedback)), if_miss);
+
+  Node* handler = Load(MachineType::AnyTagged(), p->vector,
+                       IntPtrAdd(offset, IntPtrConstant(kPointerSize)));
+
+  var_handler->Bind(handler);
+  Goto(if_handler);
+  return feedback;
+}
+
+void CodeStubAssembler::HandlePolymorphicCase(
+    const LoadICParameters* p, compiler::Node* receiver_map,
+    compiler::Node* feedback, Label* if_handler, Variable* var_handler,
+    Label* if_miss, int unroll_count) {
+  DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
+
+  // Iterate {feedback} array.
+  const int kEntrySize = 2;
+
+  for (int i = 0; i < unroll_count; i++) {
+    Label next_entry(this);
+    Node* cached_map = LoadWeakCellValue(
+        LoadFixedArrayElement(feedback, Int32Constant(i * kEntrySize)));
+    GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
+
+    // Found, now call handler.
+    Node* handler =
+        LoadFixedArrayElement(feedback, Int32Constant(i * kEntrySize + 1));
+    var_handler->Bind(handler);
+    Goto(if_handler);
+
+    Bind(&next_entry);
+  }
+  Node* length = SmiToWord32(LoadFixedArrayBaseLength(feedback));
+
+  // Loop from {unroll_count}*kEntrySize to {length}.
+  Variable var_index(this, MachineRepresentation::kWord32);
+  Label loop(this, &var_index);
+  var_index.Bind(Int32Constant(unroll_count * kEntrySize));
+  Goto(&loop);
+  Bind(&loop);
+  {
+    Node* index = var_index.value();
+    GotoIf(Int32GreaterThanOrEqual(index, length), if_miss);
+
+    Node* cached_map =
+        LoadWeakCellValue(LoadFixedArrayElement(feedback, index));
+
+    Label next_entry(this);
+    GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
+
+    // Found, now call handler.
+    Node* handler = LoadFixedArrayElement(feedback, index, kPointerSize);
+    var_handler->Bind(handler);
+    Goto(if_handler);
+
+    Bind(&next_entry);
+    var_index.Bind(Int32Add(index, Int32Constant(kEntrySize)));
+    Goto(&loop);
+  }
+}
+
+compiler::Node* CodeStubAssembler::StubCachePrimaryOffset(compiler::Node* name,
+                                                          Code::Flags flags,
+                                                          compiler::Node* map) {
+  // See v8::internal::StubCache::PrimaryOffset().
+  STATIC_ASSERT(StubCache::kCacheIndexShift == Name::kHashShift);
+  // Compute the hash of the name (use entire hash field).
+  Node* hash_field = LoadNameHashField(name);
+  Assert(WordEqual(
+      Word32And(hash_field, Int32Constant(Name::kHashNotComputedMask)),
+      Int32Constant(0)));
+
+  // Using only the low bits in 64-bit mode is unlikely to increase the
+  // risk of collision even if the heap is spread over an area larger than
+  // 4Gb (and not at all if it isn't).
+  Node* hash = Int32Add(hash_field, map);
+  // We always set the in_loop bit to zero when generating the lookup code
+  // so do it here too so the hash codes match.
+  uint32_t iflags =
+      (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
+  // Base the offset on a simple combination of name, flags, and map.
+  hash = Word32Xor(hash, Int32Constant(iflags));
+  uint32_t mask = (StubCache::kPrimaryTableSize - 1)
+                  << StubCache::kCacheIndexShift;
+  return Word32And(hash, Int32Constant(mask));
+}
+
+compiler::Node* CodeStubAssembler::StubCacheSecondaryOffset(
+    compiler::Node* name, Code::Flags flags, compiler::Node* seed) {
+  // See v8::internal::StubCache::SecondaryOffset().
+
+  // Use the seed from the primary cache in the secondary cache.
+  Node* hash = Int32Sub(seed, name);
+  // We always set the in_loop bit to zero when generating the lookup code
+  // so do it here too so the hash codes match.
+  uint32_t iflags =
+      (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
+  hash = Int32Add(hash, Int32Constant(iflags));
+  int32_t mask = (StubCache::kSecondaryTableSize - 1)
+                 << StubCache::kCacheIndexShift;
+  return Word32And(hash, Int32Constant(mask));
+}
+
+enum CodeStubAssembler::StubCacheTable : int {
+  kPrimary = static_cast<int>(StubCache::kPrimary),
+  kSecondary = static_cast<int>(StubCache::kSecondary)
+};
+
+void CodeStubAssembler::TryProbeStubCacheTable(
+    StubCache* stub_cache, StubCacheTable table_id,
+    compiler::Node* entry_offset, compiler::Node* name, Code::Flags flags,
+    compiler::Node* map, Label* if_handler, Variable* var_handler,
+    Label* if_miss) {
+  StubCache::Table table = static_cast<StubCache::Table>(table_id);
+#ifdef DEBUG
+  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+    Goto(if_miss);
+    return;
+  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+    Goto(if_miss);
+    return;
+  }
+#endif
+  // The {table_offset} holds the entry offset times four (due to masking
+  // and shifting optimizations).
+  const int kMultiplier = sizeof(StubCache::Entry) >> Name::kHashShift;
+  entry_offset = Int32Mul(entry_offset, Int32Constant(kMultiplier));
+
+  // Check that the key in the entry matches the name.
+  Node* key_base =
+      ExternalConstant(ExternalReference(stub_cache->key_reference(table)));
+  Node* entry_key = Load(MachineType::Pointer(), key_base, entry_offset);
+  GotoIf(WordNotEqual(name, entry_key), if_miss);
+
+  // Get the map entry from the cache.
+  DCHECK_EQ(kPointerSize * 2, stub_cache->map_reference(table).address() -
+                                  stub_cache->key_reference(table).address());
+  Node* entry_map =
+      Load(MachineType::Pointer(), key_base,
+           Int32Add(entry_offset, Int32Constant(kPointerSize * 2)));
+  GotoIf(WordNotEqual(map, entry_map), if_miss);
+
+  // Check that the flags match what we're looking for.
+  DCHECK_EQ(kPointerSize, stub_cache->value_reference(table).address() -
+                              stub_cache->key_reference(table).address());
+  Node* code = Load(MachineType::Pointer(), key_base,
+                    Int32Add(entry_offset, Int32Constant(kPointerSize)));
+
+  Node* code_flags =
+      LoadObjectField(code, Code::kFlagsOffset, MachineType::Uint32());
+  GotoIf(Word32NotEqual(Int32Constant(flags),
+                        Word32And(code_flags,
+                                  Int32Constant(~Code::kFlagsNotUsedInLookup))),
+         if_miss);
+
+  // We found the handler.
+  var_handler->Bind(code);
+  Goto(if_handler);
+}
+
+void CodeStubAssembler::TryProbeStubCache(
+    StubCache* stub_cache, Code::Flags flags, compiler::Node* receiver,
+    compiler::Node* name, Label* if_handler, Variable* var_handler,
+    Label* if_miss) {
+  Label try_secondary(this), miss(this);
+
+  Counters* counters = isolate()->counters();
+  IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
+
+  // Check that the {receiver} isn't a smi.
+  GotoIf(WordIsSmi(receiver), &miss);
+
+  Node* receiver_map = LoadMap(receiver);
+
+  // Probe the primary table.
+  Node* primary_offset = StubCachePrimaryOffset(name, flags, receiver_map);
+  TryProbeStubCacheTable(stub_cache, kPrimary, primary_offset, name, flags,
+                         receiver_map, if_handler, var_handler, &try_secondary);
+
+  Bind(&try_secondary);
+  {
+    // Probe the secondary table.
+    Node* secondary_offset =
+        StubCacheSecondaryOffset(name, flags, primary_offset);
+    TryProbeStubCacheTable(stub_cache, kSecondary, secondary_offset, name,
+                           flags, receiver_map, if_handler, var_handler, &miss);
+  }
+
+  Bind(&miss);
+  {
+    IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
+    Goto(if_miss);
+  }
+}
+
+void CodeStubAssembler::LoadIC(const LoadICParameters* p) {
+  Variable var_handler(this, MachineRepresentation::kTagged);
+  // TODO(ishell): defer blocks when it works.
+  Label if_handler(this, &var_handler), try_polymorphic(this),
+      try_megamorphic(this /*, Label::kDeferred*/),
+      miss(this /*, Label::kDeferred*/);
+
+  Node* receiver_map = LoadReceiverMap(p->receiver);
+
+  // Check monomorphic case.
+  Node* feedback = TryMonomorphicCase(p, receiver_map, &if_handler,
+                                      &var_handler, &try_polymorphic);
+  Bind(&if_handler);
+  {
+    LoadWithVectorDescriptor descriptor(isolate());
+    TailCallStub(descriptor, var_handler.value(), p->context, p->receiver,
+                 p->name, p->slot, p->vector);
+  }
+
+  Bind(&try_polymorphic);
+  {
+    // Check polymorphic case.
+    GotoUnless(
+        WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
+        &try_megamorphic);
+    HandlePolymorphicCase(p, receiver_map, feedback, &if_handler, &var_handler,
+                          &miss, 2);
+  }
+
+  Bind(&try_megamorphic);
+  {
+    // Check megamorphic case.
+    GotoUnless(
+        WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+        &miss);
+
+    Code::Flags code_flags =
+        Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
+
+    TryProbeStubCache(isolate()->stub_cache(), code_flags, p->receiver, p->name,
+                      &if_handler, &var_handler, &miss);
+  }
+  Bind(&miss);
+  {
+    TailCallRuntime(Runtime::kLoadIC_Miss, p->context, p->receiver, p->name,
+                    p->slot, p->vector);
+  }
+}
+
+void CodeStubAssembler::LoadGlobalIC(const LoadICParameters* p) {
+  Label try_handler(this), miss(this);
+  Node* weak_cell =
+      LoadFixedArrayElement(p->vector, p->slot, 0, SMI_PARAMETERS);
+  AssertInstanceType(weak_cell, WEAK_CELL_TYPE);
+
+  // Load value or try handler case if the {weak_cell} is cleared.
+  Node* property_cell = LoadWeakCellValue(weak_cell, &try_handler);
+  AssertInstanceType(property_cell, PROPERTY_CELL_TYPE);
+
+  Node* value = LoadObjectField(property_cell, PropertyCell::kValueOffset);
+  GotoIf(WordEqual(value, TheHoleConstant()), &miss);
+  Return(value);
+
+  Bind(&try_handler);
+  {
+    Node* handler =
+        LoadFixedArrayElement(p->vector, p->slot, kPointerSize, SMI_PARAMETERS);
+    GotoIf(WordEqual(handler, LoadRoot(Heap::kuninitialized_symbolRootIndex)),
+           &miss);
+
+    // In this case {handler} must be a Code object.
+    AssertInstanceType(handler, CODE_TYPE);
+    LoadWithVectorDescriptor descriptor(isolate());
+    Node* native_context = LoadNativeContext(p->context);
+    Node* receiver = LoadFixedArrayElement(
+        native_context, Int32Constant(Context::EXTENSION_INDEX));
+    Node* fake_name = IntPtrConstant(0);
+    TailCallStub(descriptor, handler, p->context, receiver, fake_name, p->slot,
+                 p->vector);
+  }
+  Bind(&miss);
+  {
+    TailCallRuntime(Runtime::kLoadGlobalIC_Miss, p->context, p->slot,
+                    p->vector);
+  }
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/code-stub-assembler.h b/src/code-stub-assembler.h
index 891fd24..f10e3ad 100644
--- a/src/code-stub-assembler.h
+++ b/src/code-stub-assembler.h
@@ -12,6 +12,8 @@
 namespace internal {
 
 class CallInterfaceDescriptor;
+class StatsCounter;
+class StubCache;
 
 // Provides JavaScript-specific "macro-assembler" functionality on top of the
 // CodeAssembler. By factoring the JavaScript-isms out of the CodeAssembler,
@@ -40,6 +42,8 @@
   compiler::Node* NoContextConstant();
   compiler::Node* NullConstant();
   compiler::Node* UndefinedConstant();
+  compiler::Node* TheHoleConstant();
+  compiler::Node* HashSeed();
   compiler::Node* StaleRegisterConstant();
 
   // Float64 operations.
@@ -55,6 +59,7 @@
 
   // Smi conversions.
   compiler::Node* SmiToFloat64(compiler::Node* value);
+  compiler::Node* SmiFromWord(compiler::Node* value) { return SmiTag(value); }
   compiler::Node* SmiFromWord32(compiler::Node* value);
   compiler::Node* SmiToWord(compiler::Node* value) { return SmiUntag(value); }
   compiler::Node* SmiToWord32(compiler::Node* value);
@@ -77,6 +82,8 @@
   compiler::Node* InnerAllocate(compiler::Node* previous,
                                 compiler::Node* offset);
 
+  void Assert(compiler::Node* condition);
+
   // Check a value for smi-ness
   compiler::Node* WordIsSmi(compiler::Node* a);
   // Check that the value is a positive smi.
@@ -97,18 +104,33 @@
     BranchIfFloat64Equal(value, value, if_false, if_true);
   }
 
+  // Load value from current frame by given offset in bytes.
+  compiler::Node* LoadFromFrame(int offset,
+                                MachineType rep = MachineType::AnyTagged());
+  // Load value from current parent frame by given offset in bytes.
+  compiler::Node* LoadFromParentFrame(
+      int offset, MachineType rep = MachineType::AnyTagged());
+
   // Load an object pointer from a buffer that isn't in the heap.
   compiler::Node* LoadBufferObject(compiler::Node* buffer, int offset,
                                    MachineType rep = MachineType::AnyTagged());
   // Load a field from an object on the heap.
   compiler::Node* LoadObjectField(compiler::Node* object, int offset,
                                   MachineType rep = MachineType::AnyTagged());
+  compiler::Node* LoadObjectField(compiler::Node* object,
+                                  compiler::Node* offset,
+                                  MachineType rep = MachineType::AnyTagged());
+
   // Load the floating point value of a HeapNumber.
   compiler::Node* LoadHeapNumberValue(compiler::Node* object);
   // Load the Map of an HeapObject.
   compiler::Node* LoadMap(compiler::Node* object);
   // Load the instance type of an HeapObject.
   compiler::Node* LoadInstanceType(compiler::Node* object);
+  // Checks that given heap object has given instance type.
+  void AssertInstanceType(compiler::Node* object, InstanceType instance_type);
+  // Load the properties backing store of a JSObject.
+  compiler::Node* LoadProperties(compiler::Node* object);
   // Load the elements backing store of a JSObject.
   compiler::Node* LoadElements(compiler::Node* object);
   // Load the length of a fixed array base instance.
@@ -125,11 +147,25 @@
   compiler::Node* LoadMapDescriptors(compiler::Node* map);
   // Load the prototype of a map.
   compiler::Node* LoadMapPrototype(compiler::Node* map);
-
-  // Load the hash field of a name.
-  compiler::Node* LoadNameHash(compiler::Node* name);
   // Load the instance size of a Map.
   compiler::Node* LoadMapInstanceSize(compiler::Node* map);
+  // Load the inobject properties count of a Map (valid only for JSObjects).
+  compiler::Node* LoadMapInobjectProperties(compiler::Node* map);
+
+  // Load the hash field of a name.
+  compiler::Node* LoadNameHashField(compiler::Node* name);
+  // Load the hash value of a name. If {if_hash_not_computed} label
+  // is specified then it also checks if hash is actually computed.
+  compiler::Node* LoadNameHash(compiler::Node* name,
+                               Label* if_hash_not_computed = nullptr);
+
+  // Load length field of a String object.
+  compiler::Node* LoadStringLength(compiler::Node* object);
+  // Load value field of a JSValue object.
+  compiler::Node* LoadJSValueValue(compiler::Node* object);
+  // Load value field of a WeakCell object.
+  compiler::Node* LoadWeakCellValue(compiler::Node* weak_cell,
+                                    Label* if_cleared = nullptr);
 
   compiler::Node* AllocateUninitializedFixedArray(compiler::Node* length);
 
@@ -138,6 +174,11 @@
       compiler::Node* object, compiler::Node* int32_index,
       int additional_offset = 0,
       ParameterMode parameter_mode = INTEGER_PARAMETERS);
+  // Load an array element from a FixedDoubleArray.
+  compiler::Node* LoadFixedDoubleArrayElement(
+      compiler::Node* object, compiler::Node* int32_index,
+      MachineType machine_type, int additional_offset = 0,
+      ParameterMode parameter_mode = INTEGER_PARAMETERS);
 
   // Context manipulation
   compiler::Node* LoadNativeContext(compiler::Node* context);
@@ -173,8 +214,12 @@
   compiler::Node* AllocateHeapNumberWithValue(compiler::Node* value);
   // Allocate a SeqOneByteString with the given length.
   compiler::Node* AllocateSeqOneByteString(int length);
+  compiler::Node* AllocateSeqOneByteString(compiler::Node* context,
+                                           compiler::Node* length);
   // Allocate a SeqTwoByteString with the given length.
   compiler::Node* AllocateSeqTwoByteString(int length);
+  compiler::Node* AllocateSeqTwoByteString(compiler::Node* context,
+                                           compiler::Node* length);
   // Allocated an JSArray
   compiler::Node* AllocateJSArray(ElementsKind kind, compiler::Node* array_map,
                                   compiler::Node* capacity,
@@ -221,19 +266,95 @@
   compiler::Node* BitFieldDecode(compiler::Node* word32, uint32_t shift,
                                  uint32_t mask);
 
+  void SetCounter(StatsCounter* counter, int value);
+  void IncrementCounter(StatsCounter* counter, int delta);
+  void DecrementCounter(StatsCounter* counter, int delta);
+
   // Various building blocks for stubs doing property lookups.
   void TryToName(compiler::Node* key, Label* if_keyisindex, Variable* var_index,
-                 Label* if_keyisunique, Label* call_runtime);
+                 Label* if_keyisunique, Label* if_bailout);
 
+  // Calculates array index for given dictionary entry and entry field.
+  // See Dictionary::EntryToIndex().
+  template <typename Dictionary>
+  compiler::Node* EntryToIndex(compiler::Node* entry, int field_index);
+  template <typename Dictionary>
+  compiler::Node* EntryToIndex(compiler::Node* entry) {
+    return EntryToIndex<Dictionary>(entry, Dictionary::kEntryKeyIndex);
+  }
+
+  // Looks up an entry in a NameDictionaryBase successor. If the entry is found
+  // control goes to {if_found} and {var_name_index} contains an index of the
+  // key field of the entry found. If the key is not found control goes to
+  // {if_not_found}.
+  static const int kInlinedDictionaryProbes = 4;
+  template <typename Dictionary>
+  void NameDictionaryLookup(compiler::Node* dictionary,
+                            compiler::Node* unique_name, Label* if_found,
+                            Variable* var_name_index, Label* if_not_found,
+                            int inlined_probes = kInlinedDictionaryProbes);
+
+  compiler::Node* ComputeIntegerHash(compiler::Node* key, compiler::Node* seed);
+
+  template <typename Dictionary>
+  void NumberDictionaryLookup(compiler::Node* dictionary, compiler::Node* key,
+                              Label* if_found, Variable* var_entry,
+                              Label* if_not_found);
+
+  // Tries to check if {object} has own {unique_name} property.
+  void TryHasOwnProperty(compiler::Node* object, compiler::Node* map,
+                         compiler::Node* instance_type,
+                         compiler::Node* unique_name, Label* if_found,
+                         Label* if_not_found, Label* if_bailout);
+
+  // Tries to get {object}'s own {unique_name} property value. If the property
+  // is an accessor then it also calls a getter. If the property is a double
+  // field it re-wraps value in an immutable heap number.
+  void TryGetOwnProperty(compiler::Node* context, compiler::Node* receiver,
+                         compiler::Node* object, compiler::Node* map,
+                         compiler::Node* instance_type,
+                         compiler::Node* unique_name, Label* if_found,
+                         Variable* var_value, Label* if_not_found,
+                         Label* if_bailout);
+
+  void LoadPropertyFromFastObject(compiler::Node* object, compiler::Node* map,
+                                  compiler::Node* descriptors,
+                                  compiler::Node* name_index,
+                                  Variable* var_details, Variable* var_value);
+
+  void LoadPropertyFromNameDictionary(compiler::Node* dictionary,
+                                      compiler::Node* entry,
+                                      Variable* var_details,
+                                      Variable* var_value);
+
+  void LoadPropertyFromGlobalDictionary(compiler::Node* dictionary,
+                                        compiler::Node* entry,
+                                        Variable* var_details,
+                                        Variable* var_value, Label* if_deleted);
+
+  // Generic property lookup generator. If the {object} is fast and
+  // {unique_name} property is found then the control goes to {if_found_fast}
+  // label and {var_meta_storage} and {var_name_index} will contain
+  // DescriptorArray and an index of the descriptor's name respectively.
+  // If the {object} is slow or global then the control goes to {if_found_dict}
+  // or {if_found_global} and the {var_meta_storage} and {var_name_index} will
+  // contain a dictionary and an index of the key field of the found entry.
+  // If property is not found or given lookup is not supported then
+  // the control goes to {if_not_found} or {if_bailout} respectively.
+  //
+  // Note: this code does not check if the global dictionary points to deleted
+  // entry! This has to be done by the caller.
   void TryLookupProperty(compiler::Node* object, compiler::Node* map,
-                         compiler::Node* instance_type, compiler::Node* name,
-                         Label* if_found, Label* if_not_found,
-                         Label* call_runtime);
+                         compiler::Node* instance_type,
+                         compiler::Node* unique_name, Label* if_found_fast,
+                         Label* if_found_dict, Label* if_found_global,
+                         Variable* var_meta_storage, Variable* var_name_index,
+                         Label* if_not_found, Label* if_bailout);
 
   void TryLookupElement(compiler::Node* object, compiler::Node* map,
                         compiler::Node* instance_type, compiler::Node* index,
                         Label* if_found, Label* if_not_found,
-                        Label* call_runtime);
+                        Label* if_bailout);
 
   // Instanceof helpers.
   // ES6 section 7.3.19 OrdinaryHasInstance (C, O)
@@ -241,6 +362,66 @@
                                       compiler::Node* callable,
                                       compiler::Node* object);
 
+  // LoadIC helpers.
+  struct LoadICParameters {
+    LoadICParameters(compiler::Node* context, compiler::Node* receiver,
+                     compiler::Node* name, compiler::Node* slot,
+                     compiler::Node* vector)
+        : context(context),
+          receiver(receiver),
+          name(name),
+          slot(slot),
+          vector(vector) {}
+
+    compiler::Node* context;
+    compiler::Node* receiver;
+    compiler::Node* name;
+    compiler::Node* slot;
+    compiler::Node* vector;
+  };
+
+  // Load type feedback vector from the stub caller's frame.
+  compiler::Node* LoadTypeFeedbackVectorForStub();
+
+  compiler::Node* LoadReceiverMap(compiler::Node* receiver);
+
+  // Checks monomorphic case. Returns {feedback} entry of the vector.
+  compiler::Node* TryMonomorphicCase(const LoadICParameters* p,
+                                     compiler::Node* receiver_map,
+                                     Label* if_handler, Variable* var_handler,
+                                     Label* if_miss);
+  void HandlePolymorphicCase(const LoadICParameters* p,
+                             compiler::Node* receiver_map,
+                             compiler::Node* feedback, Label* if_handler,
+                             Variable* var_handler, Label* if_miss,
+                             int unroll_count);
+
+  compiler::Node* StubCachePrimaryOffset(compiler::Node* name,
+                                         Code::Flags flags,
+                                         compiler::Node* map);
+
+  compiler::Node* StubCacheSecondaryOffset(compiler::Node* name,
+                                           Code::Flags flags,
+                                           compiler::Node* seed);
+
+  // This enum is used here as a replacement for StubCache::Table to avoid
+  // including stub cache header.
+  enum StubCacheTable : int;
+
+  void TryProbeStubCacheTable(StubCache* stub_cache, StubCacheTable table_id,
+                              compiler::Node* entry_offset,
+                              compiler::Node* name, Code::Flags flags,
+                              compiler::Node* map, Label* if_handler,
+                              Variable* var_handler, Label* if_miss);
+
+  void TryProbeStubCache(StubCache* stub_cache, Code::Flags flags,
+                         compiler::Node* receiver, compiler::Node* name,
+                         Label* if_handler, Variable* var_handler,
+                         Label* if_miss);
+
+  void LoadIC(const LoadICParameters* p);
+  void LoadGlobalIC(const LoadICParameters* p);
+
  private:
   compiler::Node* ElementOffsetFromIndex(compiler::Node* index,
                                          ElementsKind kind, ParameterMode mode,
@@ -260,5 +441,4 @@
 
 }  // namespace internal
 }  // namespace v8
-
 #endif  // V8_CODE_STUB_ASSEMBLER_H_
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index 6680e66..650e538 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -81,30 +81,14 @@
   HValue* BuildPushElement(HValue* object, HValue* argc,
                            HValue* argument_elements, ElementsKind kind);
 
-  enum ArgumentClass {
-    NONE,
-    SINGLE,
-    MULTIPLE
-  };
-
   HValue* UnmappedCase(HValue* elements, HValue* key, HValue* value);
   HValue* EmitKeyedSloppyArguments(HValue* receiver, HValue* key,
                                    HValue* value);
 
-  HValue* BuildArrayConstructor(ElementsKind kind,
-                                AllocationSiteOverrideMode override_mode,
-                                ArgumentClass argument_class);
-  HValue* BuildInternalArrayConstructor(ElementsKind kind,
-                                        ArgumentClass argument_class);
-
   HValue* BuildToString(HValue* input, bool convert);
   HValue* BuildToPrimitive(HValue* input, HValue* input_map);
 
  private:
-  HValue* BuildArraySingleArgumentConstructor(JSArrayBuilder* builder);
-  HValue* BuildArrayNArgumentsConstructor(JSArrayBuilder* builder,
-                                          ElementsKind kind);
-
   base::SmartArrayPointer<HParameter*> parameters_;
   HValue* arguments_length_;
   CompilationInfo* info_;
@@ -483,9 +467,14 @@
   HValue* closure = GetParameter(0);
   HValue* literal_index = GetParameter(1);
 
+  // TODO(turbofan): This codestub has regressed to need a frame on ia32 at some
+  // point and wasn't caught since it wasn't built in the snapshot. We should
+  // probably just replace with a TurboFan stub rather than fixing it.
+#if !(V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87)
   // This stub is very performance sensitive, the generated code must be tuned
   // so that it doesn't build and eager frame.
   info()->MarkMustNotHaveEagerFrame();
+#endif
 
   HValue* literals_array = Add<HLoadNamedField>(
       closure, nullptr, HObjectAccess::ForLiteralsPointer());
@@ -710,7 +699,7 @@
         can_store.IfNot<HCompareMap>(argument,
                                      isolate()->factory()->heap_number_map());
       }
-      can_store.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+      can_store.ThenDeopt(Deoptimizer::kFastPathFailed);
       can_store.End();
     }
     builder.EndBody();
@@ -761,7 +750,7 @@
     IfBuilder check(this);
     check.If<HCompareNumericAndBranch>(
         bits, Add<HConstant>(1 << Map::kIsExtensible), Token::NE);
-    check.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+    check.ThenDeopt(Deoptimizer::kFastPathFailed);
     check.End();
   }
 
@@ -774,7 +763,7 @@
     HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, bit_field3, mask);
     IfBuilder check(this);
     check.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
-    check.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+    check.ThenDeopt(Deoptimizer::kFastPathFailed);
     check.End();
   }
 
@@ -792,7 +781,7 @@
     HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, details, mask);
     IfBuilder readonly(this);
     readonly.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
-    readonly.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+    readonly.ThenDeopt(Deoptimizer::kFastPathFailed);
     readonly.End();
   }
 
@@ -820,14 +809,14 @@
     check_instance_type.If<HCompareNumericAndBranch>(
         instance_type, Add<HConstant>(LAST_CUSTOM_ELEMENTS_RECEIVER),
         Token::LTE);
-    check_instance_type.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+    check_instance_type.ThenDeopt(Deoptimizer::kFastPathFailed);
     check_instance_type.End();
 
     HValue* elements = Add<HLoadNamedField>(
         prototype, nullptr, HObjectAccess::ForElementsPointer());
     IfBuilder no_elements(this);
     no_elements.IfNot<HCompareObjectEqAndBranch>(elements, empty);
-    no_elements.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+    no_elements.ThenDeopt(Deoptimizer::kFastPathFailed);
     no_elements.End();
 
     environment()->Push(prototype_map);
@@ -877,7 +866,7 @@
                                               FAST_HOLEY_DOUBLE_ELEMENTS);
         environment()->Push(new_length);
       }
-      has_double_elements.ElseDeopt(Deoptimizer::kFastArrayPushFailed);
+      has_double_elements.ElseDeopt(Deoptimizer::kFastPathFailed);
       has_double_elements.End();
     }
     has_object_elements.End();
@@ -890,6 +879,191 @@
 Handle<Code> FastArrayPushStub::GenerateCode() { return DoGenerateCode(this); }
 
 template <>
+HValue* CodeStubGraphBuilder<FastFunctionBindStub>::BuildCodeStub() {
+  // TODO(verwaest): Fix deoptimizer messages.
+  HValue* argc = GetArgumentsLength();
+  HInstruction* argument_elements = Add<HArgumentsElements>(false, false);
+  HInstruction* object = Add<HAccessArgumentsAt>(argument_elements, argc,
+                                                 graph()->GetConstantMinus1());
+  BuildCheckHeapObject(object);
+  HValue* map = Add<HLoadNamedField>(object, nullptr, HObjectAccess::ForMap());
+  Add<HCheckInstanceType>(object, HCheckInstanceType::IS_JS_FUNCTION);
+
+  // Disallow binding of slow-mode functions. We need to figure out whether the
+  // length and name property are in the original state.
+  {
+    HValue* bit_field3 =
+        Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField3());
+    HValue* mask = Add<HConstant>(static_cast<int>(Map::DictionaryMap::kMask));
+    HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, bit_field3, mask);
+    IfBuilder check(this);
+    check.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
+    check.ThenDeopt(Deoptimizer::kFastPathFailed);
+    check.End();
+  }
+
+  // Check whether the length and name properties are still present as
+  // AccessorInfo objects. In that case, their value can be recomputed even if
+  // the actual value on the object changes.
+  {
+    HValue* descriptors =
+        Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapDescriptors());
+
+    HValue* descriptors_length = Add<HLoadNamedField>(
+        descriptors, nullptr, HObjectAccess::ForFixedArrayLength());
+    IfBuilder range(this);
+    range.If<HCompareNumericAndBranch>(descriptors_length,
+                                       graph()->GetConstant1(), Token::LTE);
+    range.ThenDeopt(Deoptimizer::kFastPathFailed);
+    range.End();
+
+    // Verify .length.
+    const int length_index = JSFunction::kLengthDescriptorIndex;
+    HValue* maybe_length = Add<HLoadKeyed>(
+        descriptors, Add<HConstant>(DescriptorArray::ToKeyIndex(length_index)),
+        nullptr, nullptr, FAST_ELEMENTS);
+    Unique<Name> length_string = Unique<Name>::CreateUninitialized(
+        isolate()->factory()->length_string());
+    Add<HCheckValue>(maybe_length, length_string, false);
+
+    HValue* maybe_length_accessor = Add<HLoadKeyed>(
+        descriptors,
+        Add<HConstant>(DescriptorArray::ToValueIndex(length_index)), nullptr,
+        nullptr, FAST_ELEMENTS);
+    BuildCheckHeapObject(maybe_length_accessor);
+    Add<HCheckMaps>(maybe_length_accessor,
+                    isolate()->factory()->accessor_info_map());
+
+    // Verify .name.
+    const int name_index = JSFunction::kNameDescriptorIndex;
+    HValue* maybe_name = Add<HLoadKeyed>(
+        descriptors, Add<HConstant>(DescriptorArray::ToKeyIndex(name_index)),
+        nullptr, nullptr, FAST_ELEMENTS);
+    Unique<Name> name_string =
+        Unique<Name>::CreateUninitialized(isolate()->factory()->name_string());
+    Add<HCheckValue>(maybe_name, name_string, false);
+
+    HValue* maybe_name_accessor = Add<HLoadKeyed>(
+        descriptors, Add<HConstant>(DescriptorArray::ToValueIndex(name_index)),
+        nullptr, nullptr, FAST_ELEMENTS);
+    BuildCheckHeapObject(maybe_name_accessor);
+    Add<HCheckMaps>(maybe_name_accessor,
+                    isolate()->factory()->accessor_info_map());
+  }
+
+  // Choose the right bound function map based on whether the target is
+  // constructable.
+  {
+    HValue* bit_field =
+        Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField());
+    HValue* mask = Add<HConstant>(static_cast<int>(1 << Map::kIsConstructor));
+    HValue* bits = AddUncasted<HBitwise>(Token::BIT_AND, bit_field, mask);
+
+    HValue* native_context = BuildGetNativeContext();
+    IfBuilder is_constructor(this);
+    is_constructor.If<HCompareNumericAndBranch>(bits, mask, Token::EQ);
+    is_constructor.Then();
+    {
+      HValue* map = Add<HLoadNamedField>(
+          native_context, nullptr,
+          HObjectAccess::ForContextSlot(
+              Context::BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX));
+      environment()->Push(map);
+    }
+    is_constructor.Else();
+    {
+      HValue* map = Add<HLoadNamedField>(
+          native_context, nullptr,
+          HObjectAccess::ForContextSlot(
+              Context::BOUND_FUNCTION_WITHOUT_CONSTRUCTOR_MAP_INDEX));
+      environment()->Push(map);
+    }
+    is_constructor.End();
+  }
+  HValue* bound_function_map = environment()->Pop();
+
+  // Verify that __proto__ matches that of a the target bound function.
+  {
+    HValue* prototype =
+        Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForPrototype());
+    HValue* expected_prototype = Add<HLoadNamedField>(
+        bound_function_map, nullptr, HObjectAccess::ForPrototype());
+    IfBuilder equal_prototype(this);
+    equal_prototype.IfNot<HCompareObjectEqAndBranch>(prototype,
+                                                     expected_prototype);
+    equal_prototype.ThenDeopt(Deoptimizer::kFastPathFailed);
+    equal_prototype.End();
+  }
+
+  // Allocate the arguments array.
+  IfBuilder empty_args(this);
+  empty_args.If<HCompareNumericAndBranch>(argc, graph()->GetConstant1(),
+                                          Token::LTE);
+  empty_args.Then();
+  { environment()->Push(Add<HLoadRoot>(Heap::kEmptyFixedArrayRootIndex)); }
+  empty_args.Else();
+  {
+    HValue* elements_length = AddUncasted<HSub>(argc, graph()->GetConstant1());
+    HValue* elements =
+        BuildAllocateAndInitializeArray(FAST_ELEMENTS, elements_length);
+
+    LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
+    HValue* start = graph()->GetConstant1();
+    HValue* key = builder.BeginBody(start, argc, Token::LT);
+    {
+      HValue* argument = Add<HAccessArgumentsAt>(argument_elements, argc, key);
+      HValue* index = AddUncasted<HSub>(key, graph()->GetConstant1());
+      AddElementAccess(elements, index, argument, elements, nullptr,
+                       FAST_ELEMENTS, STORE);
+    }
+    builder.EndBody();
+    environment()->Push(elements);
+  }
+  empty_args.End();
+  HValue* elements = environment()->Pop();
+
+  // Find the 'this' to bind.
+  IfBuilder no_receiver(this);
+  no_receiver.If<HCompareNumericAndBranch>(argc, graph()->GetConstant0(),
+                                           Token::EQ);
+  no_receiver.Then();
+  { environment()->Push(Add<HLoadRoot>(Heap::kUndefinedValueRootIndex)); }
+  no_receiver.Else();
+  {
+    environment()->Push(Add<HAccessArgumentsAt>(argument_elements, argc,
+                                                graph()->GetConstant0()));
+  }
+  no_receiver.End();
+  HValue* receiver = environment()->Pop();
+
+  // Allocate the resulting bound function.
+  HValue* size = Add<HConstant>(JSBoundFunction::kSize);
+  HValue* bound_function =
+      Add<HAllocate>(size, HType::JSObject(), NOT_TENURED,
+                     JS_BOUND_FUNCTION_TYPE, graph()->GetConstant0());
+  Add<HStoreNamedField>(bound_function, HObjectAccess::ForMap(),
+                        bound_function_map);
+  HValue* empty_fixed_array = Add<HLoadRoot>(Heap::kEmptyFixedArrayRootIndex);
+  Add<HStoreNamedField>(bound_function, HObjectAccess::ForPropertiesPointer(),
+                        empty_fixed_array);
+  Add<HStoreNamedField>(bound_function, HObjectAccess::ForElementsPointer(),
+                        empty_fixed_array);
+  Add<HStoreNamedField>(bound_function, HObjectAccess::ForBoundTargetFunction(),
+                        object);
+
+  Add<HStoreNamedField>(bound_function, HObjectAccess::ForBoundThis(),
+                        receiver);
+  Add<HStoreNamedField>(bound_function, HObjectAccess::ForBoundArguments(),
+                        elements);
+
+  return bound_function;
+}
+
+Handle<Code> FastFunctionBindStub::GenerateCode() {
+  return DoGenerateCode(this);
+}
+
+template <>
 HValue* CodeStubGraphBuilder<GrowArrayElementsStub>::BuildCodeStub() {
   ElementsKind kind = casted_stub()->elements_kind();
   if (IsFastDoubleElementsKind(kind)) {
@@ -972,18 +1146,6 @@
 
 
 template <>
-HValue* CodeStubGraphBuilder<ArrayBufferViewLoadFieldStub>::BuildCodeStub() {
-  return BuildArrayBufferViewFieldAccessor(GetParameter(0), nullptr,
-                                           casted_stub()->index());
-}
-
-
-Handle<Code> ArrayBufferViewLoadFieldStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
-
-template <>
 HValue* CodeStubGraphBuilder<LoadConstantStub>::BuildCodeStub() {
   HValue* map = AddLoadMap(GetParameter(0), NULL);
   HObjectAccess descriptors_access = HObjectAccess::ForObservableJSObjectOffset(
@@ -1206,8 +1368,21 @@
 template <>
 HValue* CodeStubGraphBuilder<StoreTransitionStub>::BuildCodeStub() {
   HValue* object = GetParameter(StoreTransitionHelper::ReceiverIndex());
+  HValue* value = GetParameter(StoreTransitionHelper::ValueIndex());
+  StoreTransitionStub::StoreMode store_mode = casted_stub()->store_mode();
 
-  switch (casted_stub()->store_mode()) {
+  if (store_mode != StoreTransitionStub::StoreMapOnly) {
+    value = GetParameter(StoreTransitionHelper::ValueIndex());
+    Representation representation = casted_stub()->representation();
+    if (representation.IsDouble()) {
+      // In case we are storing a double, assure that the value is a double
+      // before manipulating the properties backing store. Otherwise the actual
+      // store may deopt, leaving the backing store in an overallocated state.
+      value = AddUncasted<HForceRepresentation>(value, representation);
+    }
+  }
+
+  switch (store_mode) {
     case StoreTransitionStub::ExtendStorageAndStoreMapAndValue: {
       HValue* properties = Add<HLoadNamedField>(
           object, nullptr, HObjectAccess::ForPropertiesPointer());
@@ -1235,9 +1410,8 @@
     // Fall through.
     case StoreTransitionStub::StoreMapAndValue:
       // Store the new value into the "extended" object.
-      BuildStoreNamedField(
-          object, GetParameter(StoreTransitionHelper::ValueIndex()),
-          casted_stub()->index(), casted_stub()->representation(), true);
+      BuildStoreNamedField(object, value, casted_stub()->index(),
+                           casted_stub()->representation(), true);
     // Fall through.
 
     case StoreTransitionStub::StoreMapOnly:
@@ -1246,7 +1420,7 @@
                             GetParameter(StoreTransitionHelper::MapIndex()));
       break;
   }
-  return GetParameter(StoreTransitionHelper::ValueIndex());
+  return value;
 }
 
 
@@ -1275,15 +1449,61 @@
 
 template <>
 HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
+  ElementsKind const from_kind = casted_stub()->from_kind();
+  ElementsKind const to_kind = casted_stub()->to_kind();
+  HValue* const object = GetParameter(0);
+  HValue* const map = GetParameter(1);
+
+  // The {object} is known to be a JSObject (otherwise it wouldn't have elements
+  // anyways).
+  object->set_type(HType::JSObject());
+
   info()->MarkAsSavesCallerDoubles();
 
-  BuildTransitionElementsKind(GetParameter(0),
-                              GetParameter(1),
-                              casted_stub()->from_kind(),
-                              casted_stub()->to_kind(),
-                              casted_stub()->is_js_array());
+  DCHECK_IMPLIES(IsFastHoleyElementsKind(from_kind),
+                 IsFastHoleyElementsKind(to_kind));
 
-  return GetParameter(0);
+  if (AllocationSite::GetMode(from_kind, to_kind) == TRACK_ALLOCATION_SITE) {
+    Add<HTrapAllocationMemento>(object);
+  }
+
+  if (!IsSimpleMapChangeTransition(from_kind, to_kind)) {
+    HInstruction* elements = AddLoadElements(object);
+
+    IfBuilder if_objecthaselements(this);
+    if_objecthaselements.IfNot<HCompareObjectEqAndBranch>(
+        elements, Add<HConstant>(isolate()->factory()->empty_fixed_array()));
+    if_objecthaselements.Then();
+    {
+      // Determine the elements capacity.
+      HInstruction* elements_length = AddLoadFixedArrayLength(elements);
+
+      // Determine the effective (array) length.
+      IfBuilder if_objectisarray(this);
+      if_objectisarray.If<HHasInstanceTypeAndBranch>(object, JS_ARRAY_TYPE);
+      if_objectisarray.Then();
+      {
+        // The {object} is a JSArray, load the special "length" property.
+        Push(Add<HLoadNamedField>(object, nullptr,
+                                  HObjectAccess::ForArrayLength(from_kind)));
+      }
+      if_objectisarray.Else();
+      {
+        // The {object} is some other JSObject.
+        Push(elements_length);
+      }
+      if_objectisarray.End();
+      HValue* length = Pop();
+
+      BuildGrowElementsCapacity(object, elements, from_kind, to_kind, length,
+                                elements_length);
+    }
+    if_objecthaselements.End();
+  }
+
+  Add<HStoreNamedField>(object, HObjectAccess::ForMap(), map);
+
+  return object;
 }
 
 
@@ -1291,185 +1511,6 @@
   return DoGenerateCode(this);
 }
 
-HValue* CodeStubGraphBuilderBase::BuildArrayConstructor(
-    ElementsKind kind,
-    AllocationSiteOverrideMode override_mode,
-    ArgumentClass argument_class) {
-  HValue* constructor = GetParameter(ArrayConstructorStubBase::kConstructor);
-  HValue* alloc_site = GetParameter(ArrayConstructorStubBase::kAllocationSite);
-  JSArrayBuilder array_builder(this, kind, alloc_site, constructor,
-                               override_mode);
-  HValue* result = NULL;
-  switch (argument_class) {
-    case NONE:
-      // This stub is very performance sensitive, the generated code must be
-      // tuned so that it doesn't build and eager frame.
-      info()->MarkMustNotHaveEagerFrame();
-      result = array_builder.AllocateEmptyArray();
-      break;
-    case SINGLE:
-      result = BuildArraySingleArgumentConstructor(&array_builder);
-      break;
-    case MULTIPLE:
-      result = BuildArrayNArgumentsConstructor(&array_builder, kind);
-      break;
-  }
-
-  return result;
-}
-
-
-HValue* CodeStubGraphBuilderBase::BuildInternalArrayConstructor(
-    ElementsKind kind, ArgumentClass argument_class) {
-  HValue* constructor = GetParameter(
-      InternalArrayConstructorStubBase::kConstructor);
-  JSArrayBuilder array_builder(this, kind, constructor);
-
-  HValue* result = NULL;
-  switch (argument_class) {
-    case NONE:
-      // This stub is very performance sensitive, the generated code must be
-      // tuned so that it doesn't build and eager frame.
-      info()->MarkMustNotHaveEagerFrame();
-      result = array_builder.AllocateEmptyArray();
-      break;
-    case SINGLE:
-      result = BuildArraySingleArgumentConstructor(&array_builder);
-      break;
-    case MULTIPLE:
-      result = BuildArrayNArgumentsConstructor(&array_builder, kind);
-      break;
-  }
-  return result;
-}
-
-
-HValue* CodeStubGraphBuilderBase::BuildArraySingleArgumentConstructor(
-    JSArrayBuilder* array_builder) {
-  // Smi check and range check on the input arg.
-  HValue* constant_one = graph()->GetConstant1();
-  HValue* constant_zero = graph()->GetConstant0();
-
-  HInstruction* elements = Add<HArgumentsElements>(false);
-  HInstruction* argument = Add<HAccessArgumentsAt>(
-      elements, constant_one, constant_zero);
-
-  return BuildAllocateArrayFromLength(array_builder, argument);
-}
-
-
-HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
-    JSArrayBuilder* array_builder, ElementsKind kind) {
-  // Insert a bounds check because the number of arguments might exceed
-  // the kInitialMaxFastElementArray limit. This cannot happen for code
-  // that was parsed, but calling via Array.apply(thisArg, [...]) might
-  // trigger it.
-  HValue* length = GetArgumentsLength();
-  HConstant* max_alloc_length =
-      Add<HConstant>(JSArray::kInitialMaxFastElementArray);
-  HValue* checked_length = Add<HBoundsCheck>(length, max_alloc_length);
-
-  // We need to fill with the hole if it's a smi array in the multi-argument
-  // case because we might have to bail out while copying arguments into
-  // the array because they aren't compatible with a smi array.
-  // If it's a double array, no problem, and if it's fast then no
-  // problem either because doubles are boxed.
-  //
-  // TODO(mvstanton): consider an instruction to memset fill the array
-  // with zero in this case instead.
-  JSArrayBuilder::FillMode fill_mode = IsFastSmiElementsKind(kind)
-      ? JSArrayBuilder::FILL_WITH_HOLE
-      : JSArrayBuilder::DONT_FILL_WITH_HOLE;
-  HValue* new_object = array_builder->AllocateArray(checked_length,
-                                                    checked_length,
-                                                    fill_mode);
-  HValue* elements = array_builder->GetElementsLocation();
-  DCHECK(elements != NULL);
-
-  // Now populate the elements correctly.
-  LoopBuilder builder(this,
-                      context(),
-                      LoopBuilder::kPostIncrement);
-  HValue* start = graph()->GetConstant0();
-  HValue* key = builder.BeginBody(start, checked_length, Token::LT);
-  HInstruction* argument_elements = Add<HArgumentsElements>(false);
-  HInstruction* argument = Add<HAccessArgumentsAt>(
-      argument_elements, checked_length, key);
-
-  Add<HStoreKeyed>(elements, key, argument, nullptr, kind);
-  builder.EndBody();
-  return new_object;
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() {
-  ElementsKind kind = casted_stub()->elements_kind();
-  AllocationSiteOverrideMode override_mode = casted_stub()->override_mode();
-  return BuildArrayConstructor(kind, override_mode, NONE);
-}
-
-template <>
-HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::
-    BuildCodeStub() {
-  ElementsKind kind = casted_stub()->elements_kind();
-  AllocationSiteOverrideMode override_mode = casted_stub()->override_mode();
-  return BuildArrayConstructor(kind, override_mode, SINGLE);
-}
-
-
-Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() {
-  ElementsKind kind = casted_stub()->elements_kind();
-  AllocationSiteOverrideMode override_mode = casted_stub()->override_mode();
-  return BuildArrayConstructor(kind, override_mode, MULTIPLE);
-}
-
-
-Handle<Code> ArrayNArgumentsConstructorStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<InternalArrayNoArgumentConstructorStub>::
-    BuildCodeStub() {
-  ElementsKind kind = casted_stub()->elements_kind();
-  return BuildInternalArrayConstructor(kind, NONE);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<InternalArraySingleArgumentConstructorStub>::
-    BuildCodeStub() {
-  ElementsKind kind = casted_stub()->elements_kind();
-  return BuildInternalArrayConstructor(kind, SINGLE);
-}
-
-
-Handle<Code> InternalArraySingleArgumentConstructorStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<InternalArrayNArgumentsConstructorStub>::
-    BuildCodeStub() {
-  ElementsKind kind = casted_stub()->elements_kind();
-  return BuildInternalArrayConstructor(kind, MULTIPLE);
-}
-
-
-Handle<Code> InternalArrayNArgumentsConstructorStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
-
 template <>
 HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
   BinaryOpICState state = casted_stub()->state();
@@ -1890,6 +1931,8 @@
   Factory* factory = isolate()->factory();
   HInstruction* empty_fixed_array =
       Add<HConstant>(factory->empty_fixed_array());
+  HInstruction* empty_literals_array =
+      Add<HConstant>(factory->empty_literals_array());
   HValue* shared_info = GetParameter(0);
 
   AddIncrementCounter(counters->fast_new_closure_total());
@@ -1916,7 +1959,7 @@
   Add<HStoreNamedField>(js_function, HObjectAccess::ForElementsPointer(),
                         empty_fixed_array);
   Add<HStoreNamedField>(js_function, HObjectAccess::ForLiteralsPointer(),
-                        empty_fixed_array);
+                        empty_literals_array);
   Add<HStoreNamedField>(js_function, HObjectAccess::ForPrototypeOrInitialMap(),
                         graph()->GetConstantHole());
   Add<HStoreNamedField>(
@@ -2023,7 +2066,12 @@
   HValue* index = GetParameter(RegExpConstructResultStub::kIndex);
   HValue* input = GetParameter(RegExpConstructResultStub::kInput);
 
+  // TODO(turbofan): This codestub has regressed to need a frame on ia32 at some
+  // point and wasn't caught since it wasn't built in the snapshot. We should
+  // probably just replace with a TurboFan stub rather than fixing it.
+#if !(V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87)
   info()->MarkMustNotHaveEagerFrame();
+#endif
 
   return BuildRegExpConstructResult(length, index, input);
 }
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index d7ea506..ae3adb7 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -15,7 +15,6 @@
 #include "src/ic/ic.h"
 #include "src/macro-assembler.h"
 #include "src/parsing/parser.h"
-#include "src/profiler/cpu-profiler.h"
 
 namespace v8 {
 namespace internal {
@@ -83,8 +82,8 @@
   std::ostringstream os;
   os << *this;
   PROFILE(isolate(),
-          CodeCreateEvent(Logger::STUB_TAG, AbstractCode::cast(*code),
-                          os.str().c_str()));
+          CodeCreateEvent(CodeEventListener::STUB_TAG,
+                          AbstractCode::cast(*code), os.str().c_str()));
   Counters* counters = isolate()->counters();
   counters->total_stubs_code_size()->Increment(code->instruction_size());
 #ifdef DEBUG
@@ -99,7 +98,7 @@
 
 
 Code::Flags CodeStub::GetCodeFlags() const {
-  return Code::ComputeFlags(GetCodeKind(), GetICState(), GetExtraICState());
+  return Code::ComputeFlags(GetCodeKind(), GetExtraICState());
 }
 
 
@@ -134,8 +133,7 @@
   CodeDesc desc;
   masm.GetCode(&desc);
   // Copy the generated code into a heap object.
-  Code::Flags flags =
-      Code::ComputeFlags(GetCodeKind(), GetICState(), GetExtraICState());
+  Code::Flags flags = Code::ComputeFlags(GetCodeKind(), GetExtraICState());
   Handle<Code> new_object = factory->NewCode(
       desc, flags, masm.CodeObject(), NeedsImmovableCode());
   return new_object;
@@ -373,40 +371,6 @@
 }
 
 
-void CompareICStub::AddToSpecialCache(Handle<Code> new_object) {
-  DCHECK(*known_map_ != NULL);
-  Isolate* isolate = new_object->GetIsolate();
-  Factory* factory = isolate->factory();
-  return Map::UpdateCodeCache(known_map_,
-                              strict() ?
-                                  factory->strict_compare_ic_string() :
-                                  factory->compare_ic_string(),
-                              new_object);
-}
-
-
-bool CompareICStub::FindCodeInSpecialCache(Code** code_out) {
-  Code::Flags flags = Code::ComputeFlags(
-      GetCodeKind(),
-      UNINITIALIZED);
-  Name* name = strict() ? isolate()->heap()->strict_compare_ic_string()
-                        : isolate()->heap()->compare_ic_string();
-  Code* code = known_map_->LookupInCodeCache(name, flags);
-  if (code != nullptr) {
-    *code_out = code;
-#ifdef DEBUG
-    CompareICStub decode((*code_out)->stub_key(), isolate());
-    DCHECK(op() == decode.op());
-    DCHECK(left() == decode.left());
-    DCHECK(right() == decode.right());
-    DCHECK(state() == decode.state());
-#endif
-    return true;
-  }
-  return false;
-}
-
-
 void CompareICStub::Generate(MacroAssembler* masm) {
   switch (state()) {
     case CompareICState::UNINITIALIZED:
@@ -443,7 +407,6 @@
   }
 }
 
-
 Handle<Code> TurboFanCodeStub::GenerateCode() {
   const char* name = CodeStub::MajorName(MajorKey());
   Zone zone(isolate()->allocator());
@@ -454,6 +417,58 @@
   return assembler.GenerateCode();
 }
 
+void LoadICTrampolineTFStub::GenerateAssembly(
+    CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(0);
+  Node* name = assembler->Parameter(1);
+  Node* slot = assembler->Parameter(2);
+  Node* context = assembler->Parameter(3);
+  Node* vector = assembler->LoadTypeFeedbackVectorForStub();
+
+  CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
+  assembler->LoadIC(&p);
+}
+
+void LoadICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(0);
+  Node* name = assembler->Parameter(1);
+  Node* slot = assembler->Parameter(2);
+  Node* vector = assembler->Parameter(3);
+  Node* context = assembler->Parameter(4);
+
+  CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
+  assembler->LoadIC(&p);
+}
+
+void LoadGlobalICTrampolineStub::GenerateAssembly(
+    CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+
+  Node* slot = assembler->Parameter(0);
+  Node* context = assembler->Parameter(1);
+  Node* vector = assembler->LoadTypeFeedbackVectorForStub();
+
+  CodeStubAssembler::LoadICParameters p(context, nullptr, nullptr, slot,
+                                        vector);
+  assembler->LoadGlobalIC(&p);
+}
+
+void LoadGlobalICStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+
+  Node* slot = assembler->Parameter(0);
+  Node* vector = assembler->Parameter(1);
+  Node* context = assembler->Parameter(2);
+
+  CodeStubAssembler::LoadICParameters p(context, nullptr, nullptr, slot,
+                                        vector);
+  assembler->LoadGlobalIC(&p);
+}
+
 void AllocateHeapNumberStub::GenerateAssembly(
     CodeStubAssembler* assembler) const {
   typedef compiler::Node Node;
@@ -1827,15 +1842,16 @@
   return result_var.value();
 }
 
-void InstanceOfStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+// static
+compiler::Node* InstanceOfStub::Generate(CodeStubAssembler* assembler,
+                                         compiler::Node* object,
+                                         compiler::Node* callable,
+                                         compiler::Node* context) {
   typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
 
-  Node* object = assembler->Parameter(0);
-  Node* callable = assembler->Parameter(1);
-  Node* context = assembler->Parameter(2);
-
-  Label return_runtime(assembler, Label::kDeferred);
+  Label return_runtime(assembler, Label::kDeferred), end(assembler);
+  Variable result(assembler, MachineRepresentation::kTagged);
 
   // Check if no one installed @@hasInstance somewhere.
   assembler->GotoUnless(
@@ -1857,11 +1873,19 @@
       &return_runtime);
 
   // Use the inline OrdinaryHasInstance directly.
-  assembler->Return(assembler->OrdinaryHasInstance(context, callable, object));
+  result.Bind(assembler->OrdinaryHasInstance(context, callable, object));
+  assembler->Goto(&end);
 
   // TODO(bmeurer): Use GetPropertyStub here once available.
   assembler->Bind(&return_runtime);
-  assembler->TailCallRuntime(Runtime::kInstanceOf, context, object, callable);
+  {
+    result.Bind(assembler->CallRuntime(Runtime::kInstanceOf, context, object,
+                                       callable));
+    assembler->Goto(&end);
+  }
+
+  assembler->Bind(&end);
+  return result.value();
 }
 
 namespace {
@@ -1873,15 +1897,15 @@
   kGreaterThanOrEqual
 };
 
-void GenerateAbstractRelationalComparison(CodeStubAssembler* assembler,
-                                          RelationalComparisonMode mode) {
+compiler::Node* GenerateAbstractRelationalComparison(
+    CodeStubAssembler* assembler, RelationalComparisonMode mode,
+    compiler::Node* lhs, compiler::Node* rhs, compiler::Node* context) {
   typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
   typedef CodeStubAssembler::Variable Variable;
 
-  Node* context = assembler->Parameter(2);
-
-  Label return_true(assembler), return_false(assembler);
+  Label return_true(assembler), return_false(assembler), end(assembler);
+  Variable result(assembler, MachineRepresentation::kTagged);
 
   // Shared entry for floating point comparison.
   Label do_fcmp(assembler);
@@ -1894,14 +1918,14 @@
       var_rhs(assembler, MachineRepresentation::kTagged);
   Variable* loop_vars[2] = {&var_lhs, &var_rhs};
   Label loop(assembler, 2, loop_vars);
-  var_lhs.Bind(assembler->Parameter(0));
-  var_rhs.Bind(assembler->Parameter(1));
+  var_lhs.Bind(lhs);
+  var_rhs.Bind(rhs);
   assembler->Goto(&loop);
   assembler->Bind(&loop);
   {
     // Load the current {lhs} and {rhs} values.
-    Node* lhs = var_lhs.value();
-    Node* rhs = var_rhs.value();
+    lhs = var_lhs.value();
+    rhs = var_rhs.value();
 
     // Check if the {lhs} is a Smi or a HeapObject.
     Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
@@ -2074,7 +2098,7 @@
             Node* rhs_instance_type = assembler->LoadMapInstanceType(rhs_map);
 
             // Check if {rhs} is also a String.
-            Label if_rhsisstring(assembler),
+            Label if_rhsisstring(assembler, Label::kDeferred),
                 if_rhsisnotstring(assembler, Label::kDeferred);
             assembler->Branch(assembler->Int32LessThan(
                                   rhs_instance_type, assembler->Int32Constant(
@@ -2086,24 +2110,29 @@
               // Both {lhs} and {rhs} are strings.
               switch (mode) {
                 case kLessThan:
-                  assembler->TailCallStub(
+                  result.Bind(assembler->CallStub(
                       CodeFactory::StringLessThan(assembler->isolate()),
-                      context, lhs, rhs);
+                      context, lhs, rhs));
+                  assembler->Goto(&end);
                   break;
                 case kLessThanOrEqual:
-                  assembler->TailCallStub(
+                  result.Bind(assembler->CallStub(
                       CodeFactory::StringLessThanOrEqual(assembler->isolate()),
-                      context, lhs, rhs);
+                      context, lhs, rhs));
+                  assembler->Goto(&end);
                   break;
                 case kGreaterThan:
-                  assembler->TailCallStub(
+                  result.Bind(assembler->CallStub(
                       CodeFactory::StringGreaterThan(assembler->isolate()),
-                      context, lhs, rhs);
+                      context, lhs, rhs));
+                  assembler->Goto(&end);
                   break;
                 case kGreaterThanOrEqual:
-                  assembler->TailCallStub(CodeFactory::StringGreaterThanOrEqual(
+                  result.Bind(
+                      assembler->CallStub(CodeFactory::StringGreaterThanOrEqual(
                                               assembler->isolate()),
-                                          context, lhs, rhs);
+                                          context, lhs, rhs));
+                  assembler->Goto(&end);
                   break;
               }
             }
@@ -2208,10 +2237,19 @@
   }
 
   assembler->Bind(&return_true);
-  assembler->Return(assembler->BooleanConstant(true));
+  {
+    result.Bind(assembler->BooleanConstant(true));
+    assembler->Goto(&end);
+  }
 
   assembler->Bind(&return_false);
-  assembler->Return(assembler->BooleanConstant(false));
+  {
+    result.Bind(assembler->BooleanConstant(false));
+    assembler->Goto(&end);
+  }
+
+  assembler->Bind(&end);
+  return result.value();
 }
 
 enum ResultMode { kDontNegateResult, kNegateResult };
@@ -2340,7 +2378,9 @@
 }
 
 // ES6 section 7.2.12 Abstract Equality Comparison
-void GenerateEqual(CodeStubAssembler* assembler, ResultMode mode) {
+compiler::Node* GenerateEqual(CodeStubAssembler* assembler, ResultMode mode,
+                              compiler::Node* lhs, compiler::Node* rhs,
+                              compiler::Node* context) {
   // This is a slightly optimized version of Object::Equals represented as
   // scheduled TurboFan graph utilizing the CodeStubAssembler. Whenever you
   // change something functionality wise in here, remember to update the
@@ -2349,9 +2389,9 @@
   typedef compiler::Node Node;
   typedef CodeStubAssembler::Variable Variable;
 
-  Node* context = assembler->Parameter(2);
-
-  Label if_equal(assembler), if_notequal(assembler);
+  Label if_equal(assembler), if_notequal(assembler),
+      do_rhsstringtonumber(assembler, Label::kDeferred), end(assembler);
+  Variable result(assembler, MachineRepresentation::kTagged);
 
   // Shared entry for floating point comparison.
   Label do_fcmp(assembler);
@@ -2364,14 +2404,14 @@
       var_rhs(assembler, MachineRepresentation::kTagged);
   Variable* loop_vars[2] = {&var_lhs, &var_rhs};
   Label loop(assembler, 2, loop_vars);
-  var_lhs.Bind(assembler->Parameter(0));
-  var_rhs.Bind(assembler->Parameter(1));
+  var_lhs.Bind(lhs);
+  var_rhs.Bind(rhs);
   assembler->Goto(&loop);
   assembler->Bind(&loop);
   {
     // Load the current {lhs} and {rhs} values.
-    Node* lhs = var_lhs.value();
-    Node* rhs = var_rhs.value();
+    lhs = var_lhs.value();
+    rhs = var_rhs.value();
 
     // Check if {lhs} and {rhs} refer to the same object.
     Label if_same(assembler), if_notsame(assembler);
@@ -2399,6 +2439,8 @@
                           &if_rhsisnotsmi);
 
         assembler->Bind(&if_rhsissmi);
+        // We have already checked for {lhs} and {rhs} being the same value, so
+        // if both are Smis when we get here they must not be equal.
         assembler->Goto(&if_notequal);
 
         assembler->Bind(&if_rhsisnotsmi);
@@ -2408,8 +2450,7 @@
 
           // Check if {rhs} is a HeapNumber.
           Node* number_map = assembler->HeapNumberMapConstant();
-          Label if_rhsisnumber(assembler),
-              if_rhsisnotnumber(assembler, Label::kDeferred);
+          Label if_rhsisnumber(assembler), if_rhsisnotnumber(assembler);
           assembler->Branch(assembler->WordEqual(rhs_map, number_map),
                             &if_rhsisnumber, &if_rhsisnotnumber);
 
@@ -2429,7 +2470,7 @@
 
             // Check if the {rhs} is a String.
             Label if_rhsisstring(assembler, Label::kDeferred),
-                if_rhsisnotstring(assembler, Label::kDeferred);
+                if_rhsisnotstring(assembler);
             assembler->Branch(assembler->Int32LessThan(
                                   rhs_instance_type, assembler->Int32Constant(
                                                          FIRST_NONSTRING_TYPE)),
@@ -2437,19 +2478,17 @@
 
             assembler->Bind(&if_rhsisstring);
             {
-              // Convert the {rhs} to a Number.
-              Callable callable =
-                  CodeFactory::StringToNumber(assembler->isolate());
-              var_rhs.Bind(assembler->CallStub(callable, context, rhs));
-              assembler->Goto(&loop);
+              // The {rhs} is a String and the {lhs} is a Smi; we need
+              // to convert the {rhs} to a Number and compare the output to
+              // the Number on the {lhs}.
+              assembler->Goto(&do_rhsstringtonumber);
             }
 
             assembler->Bind(&if_rhsisnotstring);
             {
               // Check if the {rhs} is a Boolean.
               Node* boolean_map = assembler->BooleanMapConstant();
-              Label if_rhsisboolean(assembler, Label::kDeferred),
-                  if_rhsisnotboolean(assembler, Label::kDeferred);
+              Label if_rhsisboolean(assembler), if_rhsisnotboolean(assembler);
               assembler->Branch(assembler->WordEqual(rhs_map, boolean_map),
                                 &if_rhsisboolean, &if_rhsisnotboolean);
 
@@ -2466,7 +2505,7 @@
                 // Check if the {rhs} is a Receiver.
                 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
                 Label if_rhsisreceiver(assembler, Label::kDeferred),
-                    if_rhsisnotreceiver(assembler, Label::kDeferred);
+                    if_rhsisnotreceiver(assembler);
                 assembler->Branch(
                     assembler->Int32LessThanOrEqual(
                         assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
@@ -2550,8 +2589,8 @@
           assembler->Bind(&if_lhsisstring);
           {
             // Check if {rhs} is also a String.
-            Label if_rhsisstring(assembler),
-                if_rhsisnotstring(assembler, Label::kDeferred);
+            Label if_rhsisstring(assembler, Label::kDeferred),
+                if_rhsisnotstring(assembler);
             assembler->Branch(assembler->Int32LessThan(
                                   rhs_instance_type, assembler->Int32Constant(
                                                          FIRST_NONSTRING_TYPE)),
@@ -2565,7 +2604,8 @@
                   (mode == kDontNegateResult)
                       ? CodeFactory::StringEqual(assembler->isolate())
                       : CodeFactory::StringNotEqual(assembler->isolate());
-              assembler->TailCallStub(callable, context, lhs, rhs);
+              result.Bind(assembler->CallStub(callable, context, lhs, rhs));
+              assembler->Goto(&end);
             }
 
             assembler->Bind(&if_rhsisnotstring);
@@ -2583,8 +2623,7 @@
           assembler->Bind(&if_lhsisnumber);
           {
             // Check if {rhs} is also a HeapNumber.
-            Label if_rhsisnumber(assembler),
-                if_rhsisnotnumber(assembler, Label::kDeferred);
+            Label if_rhsisnumber(assembler), if_rhsisnotnumber(assembler);
             assembler->Branch(
                 assembler->Word32Equal(lhs_instance_type, rhs_instance_type),
                 &if_rhsisnumber, &if_rhsisnotnumber);
@@ -2614,16 +2653,13 @@
                 // The {rhs} is a String and the {lhs} is a HeapNumber; we need
                 // to convert the {rhs} to a Number and compare the output to
                 // the Number on the {lhs}.
-                Callable callable =
-                    CodeFactory::StringToNumber(assembler->isolate());
-                var_rhs.Bind(assembler->CallStub(callable, context, rhs));
-                assembler->Goto(&loop);
+                assembler->Goto(&do_rhsstringtonumber);
               }
 
               assembler->Bind(&if_rhsisnotstring);
               {
                 // Check if the {rhs} is a JSReceiver.
-                Label if_rhsisreceiver(assembler, Label::kDeferred),
+                Label if_rhsisreceiver(assembler),
                     if_rhsisnotreceiver(assembler);
                 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
                 assembler->Branch(
@@ -2715,8 +2751,7 @@
           assembler->Bind(&if_lhsissymbol);
           {
             // Check if the {rhs} is a JSReceiver.
-            Label if_rhsisreceiver(assembler, Label::kDeferred),
-                if_rhsisnotreceiver(assembler);
+            Label if_rhsisreceiver(assembler), if_rhsisnotreceiver(assembler);
             STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
             assembler->Branch(
                 assembler->Int32LessThanOrEqual(
@@ -2763,8 +2798,7 @@
             assembler->Bind(&if_rhsisnotsimd128value);
             {
               // Check if the {rhs} is a JSReceiver.
-              Label if_rhsisreceiver(assembler, Label::kDeferred),
-                  if_rhsisnotreceiver(assembler);
+              Label if_rhsisreceiver(assembler), if_rhsisnotreceiver(assembler);
               STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
               assembler->Branch(
                   assembler->Int32LessThanOrEqual(
@@ -2849,6 +2883,13 @@
         }
       }
     }
+
+    assembler->Bind(&do_rhsstringtonumber);
+    {
+      Callable callable = CodeFactory::StringToNumber(assembler->isolate());
+      var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+      assembler->Goto(&loop);
+    }
   }
 
   assembler->Bind(&do_fcmp);
@@ -2862,13 +2903,25 @@
   }
 
   assembler->Bind(&if_equal);
-  assembler->Return(assembler->BooleanConstant(mode == kDontNegateResult));
+  {
+    result.Bind(assembler->BooleanConstant(mode == kDontNegateResult));
+    assembler->Goto(&end);
+  }
 
   assembler->Bind(&if_notequal);
-  assembler->Return(assembler->BooleanConstant(mode == kNegateResult));
+  {
+    result.Bind(assembler->BooleanConstant(mode == kNegateResult));
+    assembler->Goto(&end);
+  }
+
+  assembler->Bind(&end);
+  return result.value();
 }
 
-void GenerateStrictEqual(CodeStubAssembler* assembler, ResultMode mode) {
+compiler::Node* GenerateStrictEqual(CodeStubAssembler* assembler,
+                                    ResultMode mode, compiler::Node* lhs,
+                                    compiler::Node* rhs,
+                                    compiler::Node* context) {
   // Here's pseudo-code for the algorithm below in case of kDontNegateResult
   // mode; for kNegateResult mode we properly negate the result.
   //
@@ -2918,13 +2971,11 @@
   // }
 
   typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
   typedef compiler::Node Node;
 
-  Node* lhs = assembler->Parameter(0);
-  Node* rhs = assembler->Parameter(1);
-  Node* context = assembler->Parameter(2);
-
-  Label if_equal(assembler), if_notequal(assembler);
+  Label if_equal(assembler), if_notequal(assembler), end(assembler);
+  Variable result(assembler, MachineRepresentation::kTagged);
 
   // Check if {lhs} and {rhs} refer to the same object.
   Label if_same(assembler), if_notsame(assembler);
@@ -3029,7 +3080,8 @@
             Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
 
             // Check if {rhs} is also a String.
-            Label if_rhsisstring(assembler), if_rhsisnotstring(assembler);
+            Label if_rhsisstring(assembler, Label::kDeferred),
+                if_rhsisnotstring(assembler);
             assembler->Branch(assembler->Int32LessThan(
                                   rhs_instance_type, assembler->Int32Constant(
                                                          FIRST_NONSTRING_TYPE)),
@@ -3041,7 +3093,8 @@
                   (mode == kDontNegateResult)
                       ? CodeFactory::StringEqual(assembler->isolate())
                       : CodeFactory::StringNotEqual(assembler->isolate());
-              assembler->TailCallStub(callable, context, lhs, rhs);
+              result.Bind(assembler->CallStub(callable, context, lhs, rhs));
+              assembler->Goto(&end);
             }
 
             assembler->Bind(&if_rhsisnotstring);
@@ -3118,10 +3171,19 @@
   }
 
   assembler->Bind(&if_equal);
-  assembler->Return(assembler->BooleanConstant(mode == kDontNegateResult));
+  {
+    result.Bind(assembler->BooleanConstant(mode == kDontNegateResult));
+    assembler->Goto(&end);
+  }
 
   assembler->Bind(&if_notequal);
-  assembler->Return(assembler->BooleanConstant(mode == kNegateResult));
+  {
+    result.Bind(assembler->BooleanConstant(mode == kNegateResult));
+    assembler->Goto(&end);
+  }
+
+  assembler->Bind(&end);
+  return result.value();
 }
 
 void GenerateStringRelationalComparison(CodeStubAssembler* assembler,
@@ -3501,37 +3563,69 @@
                           holder, callback);
 }
 
-void LessThanStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  GenerateAbstractRelationalComparison(assembler, kLessThan);
+// static
+compiler::Node* LessThanStub::Generate(CodeStubAssembler* assembler,
+                                       compiler::Node* lhs, compiler::Node* rhs,
+                                       compiler::Node* context) {
+  return GenerateAbstractRelationalComparison(assembler, kLessThan, lhs, rhs,
+                                              context);
 }
 
-void LessThanOrEqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  GenerateAbstractRelationalComparison(assembler, kLessThanOrEqual);
+// static
+compiler::Node* LessThanOrEqualStub::Generate(CodeStubAssembler* assembler,
+                                              compiler::Node* lhs,
+                                              compiler::Node* rhs,
+                                              compiler::Node* context) {
+  return GenerateAbstractRelationalComparison(assembler, kLessThanOrEqual, lhs,
+                                              rhs, context);
 }
 
-void GreaterThanStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  GenerateAbstractRelationalComparison(assembler, kGreaterThan);
+// static
+compiler::Node* GreaterThanStub::Generate(CodeStubAssembler* assembler,
+                                          compiler::Node* lhs,
+                                          compiler::Node* rhs,
+                                          compiler::Node* context) {
+  return GenerateAbstractRelationalComparison(assembler, kGreaterThan, lhs, rhs,
+                                              context);
 }
 
-void GreaterThanOrEqualStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
-  GenerateAbstractRelationalComparison(assembler, kGreaterThanOrEqual);
+// static
+compiler::Node* GreaterThanOrEqualStub::Generate(CodeStubAssembler* assembler,
+                                                 compiler::Node* lhs,
+                                                 compiler::Node* rhs,
+                                                 compiler::Node* context) {
+  return GenerateAbstractRelationalComparison(assembler, kGreaterThanOrEqual,
+                                              lhs, rhs, context);
 }
 
-void EqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  GenerateEqual(assembler, kDontNegateResult);
+// static
+compiler::Node* EqualStub::Generate(CodeStubAssembler* assembler,
+                                    compiler::Node* lhs, compiler::Node* rhs,
+                                    compiler::Node* context) {
+  return GenerateEqual(assembler, kDontNegateResult, lhs, rhs, context);
 }
 
-void NotEqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  GenerateEqual(assembler, kNegateResult);
+// static
+compiler::Node* NotEqualStub::Generate(CodeStubAssembler* assembler,
+                                       compiler::Node* lhs, compiler::Node* rhs,
+                                       compiler::Node* context) {
+  return GenerateEqual(assembler, kNegateResult, lhs, rhs, context);
 }
 
-void StrictEqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  GenerateStrictEqual(assembler, kDontNegateResult);
+// static
+compiler::Node* StrictEqualStub::Generate(CodeStubAssembler* assembler,
+                                          compiler::Node* lhs,
+                                          compiler::Node* rhs,
+                                          compiler::Node* context) {
+  return GenerateStrictEqual(assembler, kDontNegateResult, lhs, rhs, context);
 }
 
-void StrictNotEqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  GenerateStrictEqual(assembler, kNegateResult);
+// static
+compiler::Node* StrictNotEqualStub::Generate(CodeStubAssembler* assembler,
+                                             compiler::Node* lhs,
+                                             compiler::Node* rhs,
+                                             compiler::Node* context) {
+  return GenerateStrictEqual(assembler, kNegateResult, lhs, rhs, context);
 }
 
 void StringEqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
@@ -3637,13 +3731,17 @@
   }
 }
 
-void ToBooleanStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+// static
+compiler::Node* ToBooleanStub::Generate(CodeStubAssembler* assembler,
+                                        compiler::Node* value,
+                                        compiler::Node* context) {
   typedef compiler::Node Node;
   typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
 
-  Node* value = assembler->Parameter(0);
+  Variable result(assembler, MachineRepresentation::kTagged);
   Label if_valueissmi(assembler), if_valueisnotsmi(assembler),
-      return_true(assembler), return_false(assembler);
+      return_true(assembler), return_false(assembler), end(assembler);
 
   // Check if {value} is a Smi or a HeapObject.
   assembler->Branch(assembler->WordIsSmi(value), &if_valueissmi,
@@ -3721,7 +3819,8 @@
       // The {value} is an Oddball, and every Oddball knows its boolean value.
       Node* value_toboolean =
           assembler->LoadObjectField(value, Oddball::kToBooleanOffset);
-      assembler->Return(value_toboolean);
+      result.Bind(value_toboolean);
+      assembler->Goto(&end);
     }
 
     assembler->Bind(&if_valueisother);
@@ -3739,11 +3838,21 @@
                         &return_true, &return_false);
     }
   }
+
   assembler->Bind(&return_false);
-  assembler->Return(assembler->BooleanConstant(false));
+  {
+    result.Bind(assembler->BooleanConstant(false));
+    assembler->Goto(&end);
+  }
 
   assembler->Bind(&return_true);
-  assembler->Return(assembler->BooleanConstant(true));
+  {
+    result.Bind(assembler->BooleanConstant(true));
+    assembler->Goto(&end);
+  }
+
+  assembler->Bind(&end);
+  return result.value();
 }
 
 void ToIntegerStub::GenerateAssembly(CodeStubAssembler* assembler) const {
@@ -4183,17 +4292,17 @@
   stub.GetCode();
 }
 
-void HasPropertyStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+// static
+compiler::Node* HasPropertyStub::Generate(CodeStubAssembler* assembler,
+                                          compiler::Node* key,
+                                          compiler::Node* object,
+                                          compiler::Node* context) {
   typedef compiler::Node Node;
   typedef CodeStubAssembler::Label Label;
   typedef CodeStubAssembler::Variable Variable;
 
-  Node* key = assembler->Parameter(0);
-  Node* object = assembler->Parameter(1);
-  Node* context = assembler->Parameter(2);
-
-  Label call_runtime(assembler), return_true(assembler),
-      return_false(assembler);
+  Label call_runtime(assembler, Label::kDeferred), return_true(assembler),
+      return_false(assembler), end(assembler);
 
   // Ensure object is JSReceiver, otherwise call runtime to throw error.
   Label if_objectisnotsmi(assembler);
@@ -4234,7 +4343,7 @@
     assembler->Bind(&loop);
     {
       Label next_proto(assembler);
-      assembler->TryLookupProperty(var_object.value(), var_map.value(),
+      assembler->TryHasOwnProperty(var_object.value(), var_map.value(),
                                    var_instance_type.value(), key, &return_true,
                                    &next_proto, &call_runtime);
       assembler->Bind(&next_proto);
@@ -4291,14 +4400,29 @@
       assembler->Goto(&loop);
     }
   }
+
+  Variable result(assembler, MachineRepresentation::kTagged);
   assembler->Bind(&return_true);
-  assembler->Return(assembler->BooleanConstant(true));
+  {
+    result.Bind(assembler->BooleanConstant(true));
+    assembler->Goto(&end);
+  }
 
   assembler->Bind(&return_false);
-  assembler->Return(assembler->BooleanConstant(false));
+  {
+    result.Bind(assembler->BooleanConstant(false));
+    assembler->Goto(&end);
+  }
 
   assembler->Bind(&call_runtime);
-  assembler->TailCallRuntime(Runtime::kHasProperty, context, key, object);
+  {
+    result.Bind(
+        assembler->CallRuntime(Runtime::kHasProperty, context, key, object));
+    assembler->Goto(&end);
+  }
+
+  assembler->Bind(&end);
+  return result.value();
 }
 
 void CreateAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
@@ -4354,20 +4478,10 @@
 }
 
 
-std::ostream& ArrayConstructorStubBase::BasePrintName(
-    std::ostream& os,  // NOLINT
-    const char* name) const {
-  os << name << "_" << ElementsKindToString(elements_kind());
-  if (override_mode() == DISABLE_ALLOCATION_SITES) {
-    os << "_DISABLE_ALLOCATION_SITES";
-  }
-  return os;
-}
-
 bool ToBooleanICStub::UpdateStatus(Handle<Object> object) {
   Types new_types = types();
   Types old_types = new_types;
-  bool to_boolean_value = new_types.UpdateStatus(object);
+  bool to_boolean_value = new_types.UpdateStatus(isolate(), object);
   TraceTransition(old_types, new_types);
   set_sub_minor_key(TypesBits::update(sub_minor_key(), new_types.ToIntegral()));
   return to_boolean_value;
@@ -4393,14 +4507,15 @@
   return os << ")";
 }
 
-bool ToBooleanICStub::Types::UpdateStatus(Handle<Object> object) {
-  if (object->IsUndefined()) {
+bool ToBooleanICStub::Types::UpdateStatus(Isolate* isolate,
+                                          Handle<Object> object) {
+  if (object->IsUndefined(isolate)) {
     Add(UNDEFINED);
     return false;
   } else if (object->IsBoolean()) {
     Add(BOOLEAN);
-    return object->IsTrue();
-  } else if (object->IsNull()) {
+    return object->IsTrue(isolate);
+  } else if (object->IsNull(isolate)) {
     Add(NULL_TYPE);
     return false;
   } else if (object->IsSmi()) {
@@ -4494,10 +4609,95 @@
   assembler->Return(array);
 }
 
+namespace {
+
+void SingleArgumentConstructorCommon(CodeStubAssembler* assembler,
+                                     ElementsKind elements_kind,
+                                     compiler::Node* array_map,
+                                     compiler::Node* allocation_site,
+                                     AllocationSiteMode mode) {
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Label Label;
+
+  Label ok(assembler);
+  Label smi_size(assembler);
+  Label small_smi_size(assembler);
+  Label call_runtime(assembler, Label::kDeferred);
+
+  Node* size = assembler->Parameter(
+      ArraySingleArgumentConstructorDescriptor::kArraySizeSmiParameterIndex);
+  assembler->Branch(assembler->WordIsSmi(size), &smi_size, &call_runtime);
+
+  assembler->Bind(&smi_size);
+  int element_size =
+      IsFastDoubleElementsKind(elements_kind) ? kDoubleSize : kPointerSize;
+  int max_fast_elements =
+      (Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize -
+       JSArray::kSize - AllocationMemento::kSize) /
+      element_size;
+  assembler->Branch(
+      assembler->SmiAboveOrEqual(
+          size, assembler->SmiConstant(Smi::FromInt(max_fast_elements))),
+      &call_runtime, &small_smi_size);
+
+  assembler->Bind(&small_smi_size);
+  {
+    Node* array = assembler->AllocateJSArray(
+        elements_kind, array_map, size, size,
+        mode == DONT_TRACK_ALLOCATION_SITE ? nullptr : allocation_site,
+        CodeStubAssembler::SMI_PARAMETERS);
+    assembler->Return(array);
+  }
+
+  assembler->Bind(&call_runtime);
+  {
+    Node* context = assembler->Parameter(
+        ArraySingleArgumentConstructorDescriptor::kContextIndex);
+    Node* function = assembler->Parameter(
+        ArraySingleArgumentConstructorDescriptor::kFunctionIndex);
+    Node* array_size = assembler->Parameter(
+        ArraySingleArgumentConstructorDescriptor::kArraySizeSmiParameterIndex);
+    Node* allocation_site = assembler->Parameter(
+        ArraySingleArgumentConstructorDescriptor::kAllocationSiteIndex);
+    assembler->TailCallRuntime(Runtime::kNewArray, context, function,
+                               array_size, function, allocation_site);
+  }
+}
+}  // namespace
+
+void ArraySingleArgumentConstructorStub::GenerateAssembly(
+    CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+  Node* function = assembler->Parameter(
+      ArraySingleArgumentConstructorDescriptor::kFunctionIndex);
+  Node* native_context =
+      assembler->LoadObjectField(function, JSFunction::kContextOffset);
+  Node* array_map =
+      assembler->LoadJSArrayElementsMap(elements_kind(), native_context);
+  AllocationSiteMode mode = override_mode() == DISABLE_ALLOCATION_SITES
+                                ? DONT_TRACK_ALLOCATION_SITE
+                                : AllocationSite::GetMode(elements_kind());
+  Node* allocation_site = assembler->Parameter(
+      ArrayNoArgumentConstructorDescriptor::kAllocationSiteIndex);
+  SingleArgumentConstructorCommon(assembler, elements_kind(), array_map,
+                                  allocation_site, mode);
+}
+
+void InternalArraySingleArgumentConstructorStub::GenerateAssembly(
+    CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+  Node* function = assembler->Parameter(
+      ArraySingleArgumentConstructorDescriptor::kFunctionIndex);
+  Node* array_map = assembler->LoadObjectField(
+      function, JSFunction::kPrototypeOrInitialMapOffset);
+  SingleArgumentConstructorCommon(assembler, elements_kind(), array_map,
+                                  assembler->UndefinedConstant(),
+                                  DONT_TRACK_ALLOCATION_SITE);
+}
+
 ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
     : PlatformCodeStub(isolate) {
   minor_key_ = ArgumentCountBits::encode(ANY);
-  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
 }
 
 
@@ -4513,15 +4713,10 @@
   } else {
     UNREACHABLE();
   }
-  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
 }
 
-
-InternalArrayConstructorStub::InternalArrayConstructorStub(
-    Isolate* isolate) : PlatformCodeStub(isolate) {
-  InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
-}
-
+InternalArrayConstructorStub::InternalArrayConstructorStub(Isolate* isolate)
+    : PlatformCodeStub(isolate) {}
 
 Representation RepresentationFromType(Type* type) {
   if (type->Is(Type::UntaggedIntegral())) {
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 7bccaa9..85b0883 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -35,6 +35,7 @@
   V(JSEntry)                                \
   V(KeyedLoadICTrampoline)                  \
   V(LoadICTrampoline)                       \
+  V(LoadGlobalICTrampoline)                 \
   V(CallICTrampoline)                       \
   V(LoadIndexedString)                      \
   V(MathPow)                                \
@@ -45,9 +46,6 @@
   V(StoreElement)                           \
   V(StubFailureTrampoline)                  \
   V(SubString)                              \
-  V(ToNumber)                               \
-  V(NonNumberToNumber)                      \
-  V(StringToNumber)                         \
   V(ToString)                               \
   V(ToName)                                 \
   V(ToObject)                               \
@@ -56,8 +54,6 @@
   V(VectorStoreIC)                          \
   V(VectorKeyedStoreIC)                     \
   /* HydrogenCodeStubs */                   \
-  V(ArrayNArgumentsConstructor)             \
-  V(ArraySingleArgumentConstructor)         \
   V(BinaryOpIC)                             \
   V(BinaryOpWithAllocationSite)             \
   V(CreateAllocationSite)                   \
@@ -66,6 +62,7 @@
   V(FastArrayPush)                          \
   V(FastCloneRegExp)                        \
   V(FastCloneShallowArray)                  \
+  V(FastFunctionBind)                       \
   V(FastNewClosure)                         \
   V(FastNewContext)                         \
   V(FastNewObject)                          \
@@ -73,10 +70,7 @@
   V(FastNewSloppyArguments)                 \
   V(FastNewStrictArguments)                 \
   V(GrowArrayElements)                      \
-  V(InternalArrayNArgumentsConstructor)     \
-  V(InternalArraySingleArgumentConstructor) \
   V(KeyedLoadGeneric)                       \
-  V(LoadGlobalViaContext)                   \
   V(LoadScriptContextField)                 \
   V(LoadDictionaryElement)                  \
   V(NameDictionaryLookup)                   \
@@ -91,6 +85,7 @@
   V(TransitionElementsKind)                 \
   V(KeyedLoadIC)                            \
   V(LoadIC)                                 \
+  V(LoadGlobalIC)                           \
   /* TurboFanCodeStubs */                   \
   V(AllocateHeapNumber)                     \
   V(AllocateFloat32x4)                      \
@@ -104,6 +99,8 @@
   V(AllocateUint8x16)                       \
   V(AllocateBool8x16)                       \
   V(ArrayNoArgumentConstructor)             \
+  V(ArraySingleArgumentConstructor)         \
+  V(ArrayNArgumentsConstructor)             \
   V(StringLength)                           \
   V(Add)                                    \
   V(Subtract)                               \
@@ -118,6 +115,7 @@
   V(BitwiseXor)                             \
   V(Inc)                                    \
   V(InternalArrayNoArgumentConstructor)     \
+  V(InternalArraySingleArgumentConstructor) \
   V(Dec)                                    \
   V(FastCloneShallowObject)                 \
   V(InstanceOf)                             \
@@ -139,8 +137,9 @@
   V(ToInteger)                              \
   V(ToLength)                               \
   V(HasProperty)                            \
+  V(LoadICTrampolineTF)                     \
+  V(LoadICTF)                               \
   /* IC Handler stubs */                    \
-  V(ArrayBufferViewLoadField)               \
   V(KeyedLoadSloppyArguments)               \
   V(KeyedStoreSloppyArguments)              \
   V(LoadApiGetter)                          \
@@ -290,7 +289,6 @@
   // BinaryOpStub needs to override this.
   virtual Code::Kind GetCodeKind() const;
 
-  virtual InlineCacheState GetICState() const { return UNINITIALIZED; }
   virtual ExtraICState GetExtraICState() const { return kNoExtraICState; }
 
   Code::Flags GetCodeFlags() const;
@@ -436,6 +434,12 @@
     return NAME##Descriptor(isolate());                                 \
   }
 
+#define DEFINE_ON_STACK_CALL_INTERFACE_DESCRIPTOR(PARAMETER_COUNT)         \
+ public:                                                                   \
+  CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {    \
+    return OnStackArgsDescriptorBase::ForArgs(isolate(), PARAMETER_COUNT); \
+  }
+
 // There are some code stubs we just can't describe right now with a
 // CallInterfaceDescriptor. Isolate behavior for those cases with this macro.
 // An attempt to retrieve a descriptor will fail.
@@ -692,7 +696,6 @@
   explicit StringLengthStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
 
   Code::Kind GetCodeKind() const override { return Code::HANDLER; }
-  InlineCacheState GetICState() const override { return MONOMORPHIC; }
   ExtraICState GetExtraICState() const override { return Code::LOAD_IC; }
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
@@ -810,7 +813,7 @@
 
  private:
   DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
-  DEFINE_TURBOFAN_CODE_STUB(InstanceOf, TurboFanCodeStub);
+  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(InstanceOf, TurboFanCodeStub);
 };
 
 class LessThanStub final : public TurboFanCodeStub {
@@ -818,7 +821,7 @@
   explicit LessThanStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
-  DEFINE_TURBOFAN_CODE_STUB(LessThan, TurboFanCodeStub);
+  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(LessThan, TurboFanCodeStub);
 };
 
 class LessThanOrEqualStub final : public TurboFanCodeStub {
@@ -826,7 +829,7 @@
   explicit LessThanOrEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
-  DEFINE_TURBOFAN_CODE_STUB(LessThanOrEqual, TurboFanCodeStub);
+  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(LessThanOrEqual, TurboFanCodeStub);
 };
 
 class GreaterThanStub final : public TurboFanCodeStub {
@@ -834,7 +837,7 @@
   explicit GreaterThanStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
-  DEFINE_TURBOFAN_CODE_STUB(GreaterThan, TurboFanCodeStub);
+  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(GreaterThan, TurboFanCodeStub);
 };
 
 class GreaterThanOrEqualStub final : public TurboFanCodeStub {
@@ -843,7 +846,7 @@
       : TurboFanCodeStub(isolate) {}
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
-  DEFINE_TURBOFAN_CODE_STUB(GreaterThanOrEqual, TurboFanCodeStub);
+  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(GreaterThanOrEqual, TurboFanCodeStub);
 };
 
 class EqualStub final : public TurboFanCodeStub {
@@ -851,7 +854,7 @@
   explicit EqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
-  DEFINE_TURBOFAN_CODE_STUB(Equal, TurboFanCodeStub);
+  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(Equal, TurboFanCodeStub);
 };
 
 class NotEqualStub final : public TurboFanCodeStub {
@@ -859,7 +862,7 @@
   explicit NotEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
-  DEFINE_TURBOFAN_CODE_STUB(NotEqual, TurboFanCodeStub);
+  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(NotEqual, TurboFanCodeStub);
 };
 
 class StrictEqualStub final : public TurboFanCodeStub {
@@ -867,7 +870,7 @@
   explicit StrictEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
-  DEFINE_TURBOFAN_CODE_STUB(StrictEqual, TurboFanCodeStub);
+  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(StrictEqual, TurboFanCodeStub);
 };
 
 class StrictNotEqualStub final : public TurboFanCodeStub {
@@ -875,7 +878,7 @@
   explicit StrictNotEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
-  DEFINE_TURBOFAN_CODE_STUB(StrictNotEqual, TurboFanCodeStub);
+  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(StrictNotEqual, TurboFanCodeStub);
 };
 
 class StringEqualStub final : public TurboFanCodeStub {
@@ -934,7 +937,7 @@
   explicit ToBooleanStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
-  DEFINE_TURBOFAN_CODE_STUB(ToBoolean, TurboFanCodeStub);
+  DEFINE_TURBOFAN_UNARY_OP_CODE_STUB(ToBoolean, TurboFanCodeStub);
 };
 
 class ToIntegerStub final : public TurboFanCodeStub {
@@ -961,7 +964,6 @@
 
   Code::Kind GetCodeKind() const override { return Code::HANDLER; }
   ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
-  InlineCacheState GetICState() const override { return MONOMORPHIC; }
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
   DEFINE_CODE_STUB(StoreInterceptor, TurboFanCodeStub);
@@ -974,7 +976,6 @@
 
   Code::Kind GetCodeKind() const override { return Code::HANDLER; }
   ExtraICState GetExtraICState() const override { return Code::KEYED_LOAD_IC; }
-  InlineCacheState GetICState() const override { return MONOMORPHIC; }
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
   DEFINE_TURBOFAN_CODE_STUB(LoadIndexedInterceptor, TurboFanCodeStub);
@@ -986,7 +987,7 @@
   explicit HasPropertyStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(HasProperty);
-  DEFINE_TURBOFAN_CODE_STUB(HasProperty, TurboFanCodeStub);
+  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(HasProperty, TurboFanCodeStub);
 };
 
 enum StringAddFlags {
@@ -1266,10 +1267,18 @@
   explicit FastArrayPushStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
 
  private:
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(FastArrayPush);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(VarArgFunction);
   DEFINE_HYDROGEN_CODE_STUB(FastArrayPush, HydrogenCodeStub);
 };
 
+class FastFunctionBindStub : public HydrogenCodeStub {
+ public:
+  explicit FastFunctionBindStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
+
+ private:
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(VarArgFunction);
+  DEFINE_HYDROGEN_CODE_STUB(FastFunctionBind, HydrogenCodeStub);
+};
 
 enum AllocationSiteOverrideMode {
   DONT_OVERRIDE,
@@ -1298,7 +1307,7 @@
 
   class ArgumentCountBits : public BitField<ArgumentCountKey, 0, 2> {};
 
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayConstructor);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayNArgumentsConstructor);
   DEFINE_PLATFORM_CODE_STUB(ArrayConstructor, PlatformCodeStub);
 };
 
@@ -1310,7 +1319,7 @@
  private:
   void GenerateCase(MacroAssembler* masm, ElementsKind kind);
 
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(InternalArrayConstructor);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayNArgumentsConstructor);
   DEFINE_PLATFORM_CODE_STUB(InternalArrayConstructor, PlatformCodeStub);
 };
 
@@ -1325,13 +1334,17 @@
   }
 
   CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
-    if (exponent_type() == TAGGED) {
+    if (exponent_type() == ON_STACK) {
+      return OnStackArgsDescriptorBase::ForArgs(isolate(), 2);
+    } else if (exponent_type() == TAGGED) {
       return MathPowTaggedDescriptor(isolate());
     } else if (exponent_type() == INTEGER) {
       return MathPowIntegerDescriptor(isolate());
+    } else {
+      // A CallInterfaceDescriptor doesn't specify double registers (yet).
+      DCHECK_EQ(DOUBLE, exponent_type());
+      return ContextOnlyDescriptor(isolate());
     }
-    // A CallInterfaceDescriptor doesn't specify double registers (yet).
-    return ContextOnlyDescriptor(isolate());
   }
 
  private:
@@ -1354,8 +1367,6 @@
 
   Code::Kind GetCodeKind() const override { return Code::CALL_IC; }
 
-  InlineCacheState GetICState() const override { return GENERIC; }
-
   ExtraICState GetExtraICState() const final {
     return static_cast<ExtraICState>(minor_key_);
   }
@@ -1365,9 +1376,7 @@
   ConvertReceiverMode convert_mode() const { return state().convert_mode(); }
   TailCallMode tail_call_mode() const { return state().tail_call_mode(); }
 
-  CallICState state() const {
-    return CallICState(static_cast<ExtraICState>(minor_key_));
-  }
+  CallICState state() const { return CallICState(GetExtraICState()); }
 
   // Code generation helpers.
   void GenerateMiss(MacroAssembler* masm);
@@ -1416,7 +1425,6 @@
  public:
   Code::Kind GetCodeKind() const override { return Code::HANDLER; }
   ExtraICState GetExtraICState() const override { return kind(); }
-  InlineCacheState GetICState() const override { return MONOMORPHIC; }
 
   void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
 
@@ -1453,31 +1461,6 @@
 };
 
 
-class ArrayBufferViewLoadFieldStub : public HandlerStub {
- public:
-  ArrayBufferViewLoadFieldStub(Isolate* isolate, FieldIndex index)
-      : HandlerStub(isolate) {
-    int property_index_key = index.GetFieldAccessStubKey();
-    set_sub_minor_key(
-        ArrayBufferViewLoadFieldByIndexBits::encode(property_index_key));
-  }
-
-  FieldIndex index() const {
-    int property_index_key =
-        ArrayBufferViewLoadFieldByIndexBits::decode(sub_minor_key());
-    return FieldIndex::FromFieldAccessStubKey(property_index_key);
-  }
-
- protected:
-  Code::Kind kind() const override { return Code::LOAD_IC; }
-
- private:
-  class ArrayBufferViewLoadFieldByIndexBits : public BitField<int, 0, 13> {};
-
-  DEFINE_HANDLER_CODE_STUB(ArrayBufferViewLoadField, HandlerStub);
-};
-
-
 class KeyedLoadSloppyArgumentsStub : public HandlerStub {
  public:
   explicit KeyedLoadSloppyArgumentsStub(Isolate* isolate)
@@ -1542,7 +1525,6 @@
 
   Code::Kind GetCodeKind() const override { return Code::HANDLER; }
   ExtraICState GetExtraICState() const override { return Code::LOAD_IC; }
-  InlineCacheState GetICState() const override { return MONOMORPHIC; }
 
   int index() const { return IndexBits::decode(minor_key_); }
   bool receiver_is_holder() const {
@@ -1764,26 +1746,6 @@
 };
 
 
-class LoadGlobalViaContextStub final : public PlatformCodeStub {
- public:
-  static const int kMaximumDepth = 15;
-
-  LoadGlobalViaContextStub(Isolate* isolate, int depth)
-      : PlatformCodeStub(isolate) {
-    minor_key_ = DepthBits::encode(depth);
-  }
-
-  int depth() const { return DepthBits::decode(minor_key_); }
-
- private:
-  class DepthBits : public BitField<int, 0, 4> {};
-  STATIC_ASSERT(DepthBits::kMax == kMaximumDepth);
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadGlobalViaContext);
-  DEFINE_PLATFORM_CODE_STUB(LoadGlobalViaContext, PlatformCodeStub);
-};
-
-
 class StoreGlobalViaContextStub final : public PlatformCodeStub {
  public:
   static const int kMaximumDepth = 15;
@@ -1882,8 +1844,6 @@
 
   Code::Kind GetCodeKind() const override { return Code::BINARY_OP_IC; }
 
-  InlineCacheState GetICState() const final { return state().GetICState(); }
-
   ExtraICState GetExtraICState() const final {
     return static_cast<ExtraICState>(sub_minor_key());
   }
@@ -1927,8 +1887,6 @@
 
   Code::Kind GetCodeKind() const override { return Code::BINARY_OP_IC; }
 
-  InlineCacheState GetICState() const override { return state().GetICState(); }
-
   ExtraICState GetExtraICState() const override {
     return static_cast<ExtraICState>(minor_key_);
   }
@@ -1937,7 +1895,7 @@
 
  private:
   BinaryOpICState state() const {
-    return BinaryOpICState(isolate(), static_cast<ExtraICState>(minor_key_));
+    return BinaryOpICState(isolate(), GetExtraICState());
   }
 
   static void GenerateAheadOfTime(Isolate* isolate,
@@ -2006,14 +1964,19 @@
                 CompareICState::State right, CompareICState::State state)
       : PlatformCodeStub(isolate) {
     DCHECK(Token::IsCompareOp(op));
+    DCHECK(OpBits::is_valid(op - Token::EQ));
     minor_key_ = OpBits::encode(op - Token::EQ) |
                  LeftStateBits::encode(left) | RightStateBits::encode(right) |
                  StateBits::encode(state);
   }
+  CompareICStub(Isolate* isolate, ExtraICState extra_ic_state)
+      : PlatformCodeStub(isolate) {
+    minor_key_ = extra_ic_state;
+  }
 
   void set_known_map(Handle<Map> map) { known_map_ = map; }
 
-  InlineCacheState GetICState() const override;
+  InlineCacheState GetICState() const;
 
   Token::Value op() const {
     return static_cast<Token::Value>(Token::EQ + OpBits::decode(minor_key_));
@@ -2044,8 +2007,9 @@
   bool strict() const { return op() == Token::EQ_STRICT; }
   Condition GetCondition() const;
 
-  void AddToSpecialCache(Handle<Code> new_object) override;
-  bool FindCodeInSpecialCache(Code** code_out) override;
+  // Although we don't cache anything in the special cache we have to define
+  // this predicate to avoid appearance of code stubs with embedded maps in
+  // the global stub cache.
   bool UseSpecialCache() override {
     return state() == CompareICState::KNOWN_RECEIVER;
   }
@@ -2129,7 +2093,7 @@
  public:
   explicit RegExpExecStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
 
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(ContextOnly);
+  DEFINE_ON_STACK_CALL_INTERFACE_DESCRIPTOR(4);
   DEFINE_PLATFORM_CODE_STUB(RegExpExec, PlatformCodeStub);
 };
 
@@ -2159,17 +2123,6 @@
 };
 
 
-enum StringIndexFlags {
-  // Accepts smis or heap numbers.
-  STRING_INDEX_IS_NUMBER,
-
-  // Accepts smis or heap numbers that are valid array indices
-  // (ECMA-262 15.4). Invalid indices are reported as being out of
-  // range.
-  STRING_INDEX_IS_ARRAY_INDEX
-};
-
-
 enum ReceiverCheckMode {
   // We don't know anything about the receiver.
   RECEIVER_IS_UNKNOWN,
@@ -2203,7 +2156,6 @@
   StringCharCodeAtGenerator(Register object, Register index, Register result,
                             Label* receiver_not_string, Label* index_not_number,
                             Label* index_out_of_range,
-                            StringIndexFlags index_flags,
                             ReceiverCheckMode check_mode = RECEIVER_IS_UNKNOWN)
       : object_(object),
         index_(index),
@@ -2211,7 +2163,6 @@
         receiver_not_string_(receiver_not_string),
         index_not_number_(index_not_number),
         index_out_of_range_(index_out_of_range),
-        index_flags_(index_flags),
         check_mode_(check_mode) {
     DCHECK(!result_.is(object_));
     DCHECK(!result_.is(index_));
@@ -2243,7 +2194,6 @@
   Label* index_not_number_;
   Label* index_out_of_range_;
 
-  StringIndexFlags index_flags_;
   ReceiverCheckMode check_mode_;
 
   Label call_runtime_;
@@ -2307,11 +2257,10 @@
   StringCharAtGenerator(Register object, Register index, Register scratch,
                         Register result, Label* receiver_not_string,
                         Label* index_not_number, Label* index_out_of_range,
-                        StringIndexFlags index_flags,
                         ReceiverCheckMode check_mode = RECEIVER_IS_UNKNOWN)
       : char_code_at_generator_(object, index, scratch, receiver_not_string,
                                 index_not_number, index_out_of_range,
-                                index_flags, check_mode),
+                                check_mode),
         char_from_code_generator_(scratch, result) {}
 
   // Generates the fast case code. On the fallthrough path |result|
@@ -2346,64 +2295,72 @@
 
 class LoadDictionaryElementStub : public HydrogenCodeStub {
  public:
-  explicit LoadDictionaryElementStub(Isolate* isolate, const LoadICState& state)
-      : HydrogenCodeStub(isolate) {
-    minor_key_ = state.GetExtraICState();
-  }
+  explicit LoadDictionaryElementStub(Isolate* isolate)
+      : HydrogenCodeStub(isolate) {}
 
-  CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
-    return LoadWithVectorDescriptor(isolate());
-  }
-
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
   DEFINE_HYDROGEN_CODE_STUB(LoadDictionaryElement, HydrogenCodeStub);
 };
 
 
 class KeyedLoadGenericStub : public HydrogenCodeStub {
  public:
-  explicit KeyedLoadGenericStub(Isolate* isolate, const LoadICState& state)
-      : HydrogenCodeStub(isolate) {
-    minor_key_ = state.GetExtraICState();
-  }
+  explicit KeyedLoadGenericStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
 
   Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
-  InlineCacheState GetICState() const override { return GENERIC; }
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
-
   DEFINE_HYDROGEN_CODE_STUB(KeyedLoadGeneric, HydrogenCodeStub);
 };
 
 
 class LoadICTrampolineStub : public PlatformCodeStub {
  public:
-  LoadICTrampolineStub(Isolate* isolate, const LoadICState& state)
-      : PlatformCodeStub(isolate) {
-    minor_key_ = state.GetExtraICState();
-  }
+  explicit LoadICTrampolineStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
 
   Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
 
-  InlineCacheState GetICState() const final { return GENERIC; }
-
-  ExtraICState GetExtraICState() const final {
-    return static_cast<ExtraICState>(minor_key_);
-  }
-
- protected:
-  LoadICState state() const {
-    return LoadICState(static_cast<ExtraICState>(minor_key_));
-  }
-
   DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
   DEFINE_PLATFORM_CODE_STUB(LoadICTrampoline, PlatformCodeStub);
 };
 
+class LoadICTrampolineTFStub : public TurboFanCodeStub {
+ public:
+  explicit LoadICTrampolineTFStub(Isolate* isolate)
+      : TurboFanCodeStub(isolate) {}
+
+  void GenerateAssembly(CodeStubAssembler* assembler) const override;
+
+  Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
+  DEFINE_CODE_STUB(LoadICTrampolineTF, TurboFanCodeStub);
+};
+
+class LoadGlobalICTrampolineStub : public TurboFanCodeStub {
+ public:
+  explicit LoadGlobalICTrampolineStub(Isolate* isolate,
+                                      const LoadGlobalICState& state)
+      : TurboFanCodeStub(isolate) {
+    minor_key_ = state.GetExtraICState();
+  }
+
+  void GenerateAssembly(CodeStubAssembler* assembler) const override;
+
+  Code::Kind GetCodeKind() const override { return Code::LOAD_GLOBAL_IC; }
+
+  ExtraICState GetExtraICState() const final {
+    return static_cast<ExtraICState>(minor_key_);
+  }
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadGlobal);
+  DEFINE_CODE_STUB(LoadGlobalICTrampoline, TurboFanCodeStub);
+};
 
 class KeyedLoadICTrampolineStub : public LoadICTrampolineStub {
  public:
-  explicit KeyedLoadICTrampolineStub(Isolate* isolate, const LoadICState& state)
-      : LoadICTrampolineStub(isolate, state) {}
+  explicit KeyedLoadICTrampolineStub(Isolate* isolate)
+      : LoadICTrampolineStub(isolate) {}
 
   Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
 
@@ -2420,8 +2377,6 @@
 
   Code::Kind GetCodeKind() const override { return Code::STORE_IC; }
 
-  InlineCacheState GetICState() const final { return GENERIC; }
-
   ExtraICState GetExtraICState() const final {
     return static_cast<ExtraICState>(minor_key_);
   }
@@ -2458,8 +2413,6 @@
 
   Code::Kind GetCodeKind() const override { return Code::CALL_IC; }
 
-  InlineCacheState GetICState() const final { return GENERIC; }
-
   ExtraICState GetExtraICState() const final {
     return static_cast<ExtraICState>(minor_key_);
   }
@@ -2476,18 +2429,11 @@
 
 class LoadICStub : public PlatformCodeStub {
  public:
-  explicit LoadICStub(Isolate* isolate, const LoadICState& state)
-      : PlatformCodeStub(isolate) {
-    minor_key_ = state.GetExtraICState();
-  }
+  explicit LoadICStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
 
   void GenerateForTrampoline(MacroAssembler* masm);
 
   Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
-  InlineCacheState GetICState() const final { return GENERIC; }
-  ExtraICState GetExtraICState() const final {
-    return static_cast<ExtraICState>(minor_key_);
-  }
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
   DEFINE_PLATFORM_CODE_STUB(LoadIC, PlatformCodeStub);
@@ -2496,21 +2442,44 @@
   void GenerateImpl(MacroAssembler* masm, bool in_frame);
 };
 
+class LoadICTFStub : public TurboFanCodeStub {
+ public:
+  explicit LoadICTFStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  void GenerateAssembly(CodeStubAssembler* assembler) const override;
+
+  Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
+  DEFINE_CODE_STUB(LoadICTF, TurboFanCodeStub);
+};
+
+class LoadGlobalICStub : public TurboFanCodeStub {
+ public:
+  explicit LoadGlobalICStub(Isolate* isolate, const LoadGlobalICState& state)
+      : TurboFanCodeStub(isolate) {
+    minor_key_ = state.GetExtraICState();
+  }
+
+  void GenerateAssembly(CodeStubAssembler* assembler) const override;
+
+  Code::Kind GetCodeKind() const override { return Code::LOAD_GLOBAL_IC; }
+
+  ExtraICState GetExtraICState() const final {
+    return static_cast<ExtraICState>(minor_key_);
+  }
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadGlobalWithVector);
+  DEFINE_CODE_STUB(LoadGlobalIC, TurboFanCodeStub);
+};
 
 class KeyedLoadICStub : public PlatformCodeStub {
  public:
-  explicit KeyedLoadICStub(Isolate* isolate, const LoadICState& state)
-      : PlatformCodeStub(isolate) {
-    minor_key_ = state.GetExtraICState();
-  }
+  explicit KeyedLoadICStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
 
   void GenerateForTrampoline(MacroAssembler* masm);
 
   Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
-  InlineCacheState GetICState() const final { return GENERIC; }
-  ExtraICState GetExtraICState() const final {
-    return static_cast<ExtraICState>(minor_key_);
-  }
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
   DEFINE_PLATFORM_CODE_STUB(KeyedLoadIC, PlatformCodeStub);
@@ -2530,7 +2499,7 @@
   void GenerateForTrampoline(MacroAssembler* masm);
 
   Code::Kind GetCodeKind() const final { return Code::STORE_IC; }
-  InlineCacheState GetICState() const final { return GENERIC; }
+
   ExtraICState GetExtraICState() const final {
     return static_cast<ExtraICState>(minor_key_);
   }
@@ -2553,7 +2522,7 @@
   void GenerateForTrampoline(MacroAssembler* masm);
 
   Code::Kind GetCodeKind() const final { return Code::KEYED_STORE_IC; }
-  InlineCacheState GetICState() const final { return GENERIC; }
+
   ExtraICState GetExtraICState() const final {
     return static_cast<ExtraICState>(minor_key_);
   }
@@ -2742,13 +2711,11 @@
 
 class TransitionElementsKindStub : public HydrogenCodeStub {
  public:
-  TransitionElementsKindStub(Isolate* isolate,
-                             ElementsKind from_kind,
-                             ElementsKind to_kind,
-                             bool is_js_array) : HydrogenCodeStub(isolate) {
+  TransitionElementsKindStub(Isolate* isolate, ElementsKind from_kind,
+                             ElementsKind to_kind)
+      : HydrogenCodeStub(isolate) {
     set_sub_minor_key(FromKindBits::encode(from_kind) |
-                      ToKindBits::encode(to_kind) |
-                      IsJSArrayBits::encode(is_js_array));
+                      ToKindBits::encode(to_kind));
   }
 
   ElementsKind from_kind() const {
@@ -2757,12 +2724,9 @@
 
   ElementsKind to_kind() const { return ToKindBits::decode(sub_minor_key()); }
 
-  bool is_js_array() const { return IsJSArrayBits::decode(sub_minor_key()); }
-
  private:
   class FromKindBits: public BitField<ElementsKind, 8, 8> {};
   class ToKindBits: public BitField<ElementsKind, 0, 8> {};
-  class IsJSArrayBits: public BitField<bool, 16, 1> {};
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(TransitionElementsKind);
   DEFINE_HYDROGEN_CODE_STUB(TransitionElementsKind, HydrogenCodeStub);
@@ -2795,50 +2759,6 @@
 SIMD128_TYPES(SIMD128_ALLOC_STUB)
 #undef SIMD128_ALLOC_STUB
 
-class ArrayConstructorStubBase : public HydrogenCodeStub {
- public:
-  ArrayConstructorStubBase(Isolate* isolate,
-                           ElementsKind kind,
-                           AllocationSiteOverrideMode override_mode)
-      : HydrogenCodeStub(isolate) {
-    // It only makes sense to override local allocation site behavior
-    // if there is a difference between the global allocation site policy
-    // for an ElementsKind and the desired usage of the stub.
-    DCHECK(override_mode != DISABLE_ALLOCATION_SITES ||
-           AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE);
-    set_sub_minor_key(ElementsKindBits::encode(kind) |
-                      AllocationSiteOverrideModeBits::encode(override_mode));
-  }
-
-  ElementsKind elements_kind() const {
-    return ElementsKindBits::decode(sub_minor_key());
-  }
-
-  AllocationSiteOverrideMode override_mode() const {
-    return AllocationSiteOverrideModeBits::decode(sub_minor_key());
-  }
-
-  static void GenerateStubsAheadOfTime(Isolate* isolate);
-
-  // Parameters accessed via CodeStubGraphBuilder::GetParameter()
-  static const int kConstructor = 0;
-  static const int kAllocationSite = 1;
-
- protected:
-  std::ostream& BasePrintName(std::ostream& os,
-                              const char* name) const;  // NOLINT
-
- private:
-  // Ensure data fits within available bits.
-  STATIC_ASSERT(LAST_ALLOCATION_SITE_OVERRIDE_MODE == 1);
-
-  class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
-  class AllocationSiteOverrideModeBits: public
-      BitField<AllocationSiteOverrideMode, 8, 1> {};  // NOLINT
-
-  DEFINE_CODE_STUB_BASE(ArrayConstructorStubBase, HydrogenCodeStub);
-};
-
 class CommonArrayConstructorStub : public TurboFanCodeStub {
  protected:
   CommonArrayConstructorStub(Isolate* isolate, ElementsKind kind,
@@ -2869,6 +2789,8 @@
     return AllocationSiteOverrideModeBits::decode(sub_minor_key());
   }
 
+  static void GenerateStubsAheadOfTime(Isolate* isolate);
+
  private:
   // Ensure data fits within available bits.
   STATIC_ASSERT(LAST_ALLOCATION_SITE_OVERRIDE_MODE == 1);
@@ -2911,94 +2833,53 @@
                             CommonArrayConstructorStub);
 };
 
-class ArraySingleArgumentConstructorStub : public ArrayConstructorStubBase {
+class ArraySingleArgumentConstructorStub : public CommonArrayConstructorStub {
  public:
   ArraySingleArgumentConstructorStub(
-      Isolate* isolate,
-      ElementsKind kind,
+      Isolate* isolate, ElementsKind kind,
       AllocationSiteOverrideMode override_mode = DONT_OVERRIDE)
-      : ArrayConstructorStubBase(isolate, kind, override_mode) {
-  }
+      : CommonArrayConstructorStub(isolate, kind, override_mode) {}
 
  private:
   void PrintName(std::ostream& os) const override {  // NOLINT
-    BasePrintName(os, "ArraySingleArgumentConstructorStub");
+    os << "ArraySingleArgumentConstructorStub";
   }
 
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayConstructor);
-  DEFINE_HYDROGEN_CODE_STUB(ArraySingleArgumentConstructor,
-                            ArrayConstructorStubBase);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(ArraySingleArgumentConstructor);
+  DEFINE_TURBOFAN_CODE_STUB(ArraySingleArgumentConstructor,
+                            CommonArrayConstructorStub);
 };
 
-
-class ArrayNArgumentsConstructorStub : public ArrayConstructorStubBase {
- public:
-  ArrayNArgumentsConstructorStub(
-      Isolate* isolate,
-      ElementsKind kind,
-      AllocationSiteOverrideMode override_mode = DONT_OVERRIDE)
-      : ArrayConstructorStubBase(isolate, kind, override_mode) {
-  }
-
- private:
-  void PrintName(std::ostream& os) const override {  // NOLINT
-    BasePrintName(os, "ArrayNArgumentsConstructorStub");
-  }
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayConstructor);
-  DEFINE_HYDROGEN_CODE_STUB(ArrayNArgumentsConstructor,
-                            ArrayConstructorStubBase);
-};
-
-
-class InternalArrayConstructorStubBase : public HydrogenCodeStub {
- public:
-  InternalArrayConstructorStubBase(Isolate* isolate, ElementsKind kind)
-      : HydrogenCodeStub(isolate) {
-    set_sub_minor_key(ElementsKindBits::encode(kind));
-  }
-
-  static void GenerateStubsAheadOfTime(Isolate* isolate);
-
-  // Parameters accessed via CodeStubGraphBuilder::GetParameter()
-  static const int kConstructor = 0;
-
-  ElementsKind elements_kind() const {
-    return ElementsKindBits::decode(sub_minor_key());
-  }
-
- private:
-  class ElementsKindBits : public BitField<ElementsKind, 0, 8> {};
-
-  DEFINE_CODE_STUB_BASE(InternalArrayConstructorStubBase, HydrogenCodeStub);
-};
-
-
-class InternalArraySingleArgumentConstructorStub : public
-    InternalArrayConstructorStubBase {
+class InternalArraySingleArgumentConstructorStub
+    : public CommonArrayConstructorStub {
  public:
   InternalArraySingleArgumentConstructorStub(Isolate* isolate,
                                              ElementsKind kind)
-      : InternalArrayConstructorStubBase(isolate, kind) { }
+      : CommonArrayConstructorStub(isolate, kind, DONT_OVERRIDE) {}
 
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(InternalArrayConstructor);
-  DEFINE_HYDROGEN_CODE_STUB(InternalArraySingleArgumentConstructor,
-                            InternalArrayConstructorStubBase);
+ private:
+  void PrintName(std::ostream& os) const override {  // NOLINT
+    os << "InternalArraySingleArgumentConstructorStub";
+  }
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(ArraySingleArgumentConstructor);
+  DEFINE_TURBOFAN_CODE_STUB(InternalArraySingleArgumentConstructor,
+                            CommonArrayConstructorStub);
 };
 
-
-class InternalArrayNArgumentsConstructorStub : public
-    InternalArrayConstructorStubBase {
+class ArrayNArgumentsConstructorStub : public PlatformCodeStub {
  public:
-  InternalArrayNArgumentsConstructorStub(Isolate* isolate, ElementsKind kind)
-      : InternalArrayConstructorStubBase(isolate, kind) { }
+  explicit ArrayNArgumentsConstructorStub(Isolate* isolate)
+      : PlatformCodeStub(isolate) {}
 
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(InternalArrayConstructor);
-  DEFINE_HYDROGEN_CODE_STUB(InternalArrayNArgumentsConstructor,
-                            InternalArrayConstructorStubBase);
+  CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
+    return ArrayNArgumentsConstructorDescriptor(isolate());
+  }
+
+ private:
+  DEFINE_PLATFORM_CODE_STUB(ArrayNArgumentsConstructor, PlatformCodeStub);
 };
 
-
 class StoreElementStub : public PlatformCodeStub {
  public:
   StoreElementStub(Isolate* isolate, ElementsKind elements_kind,
@@ -3051,7 +2932,7 @@
     Types() : EnumSet<Type, uint16_t>(0) {}
     explicit Types(uint16_t bits) : EnumSet<Type, uint16_t>(bits) {}
 
-    bool UpdateStatus(Handle<Object> object);
+    bool UpdateStatus(Isolate* isolate, Handle<Object> object);
     bool NeedsMap() const;
     bool CanBeUndetectable() const {
       return Contains(ToBooleanICStub::SPEC_OBJECT);
@@ -3080,7 +2961,7 @@
 
   ExtraICState GetExtraICState() const override { return types().ToIntegral(); }
 
-  InlineCacheState GetICState() const override {
+  InlineCacheState GetICState() const {
     if (types().IsEmpty()) {
       return ::v8::internal::UNINITIALIZED;
     } else {
@@ -3197,37 +3078,10 @@
  public:
   explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
 
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(ContextOnly);
+  DEFINE_ON_STACK_CALL_INTERFACE_DESCRIPTOR(3);
   DEFINE_PLATFORM_CODE_STUB(SubString, PlatformCodeStub);
 };
 
-
-class ToNumberStub final : public PlatformCodeStub {
- public:
-  explicit ToNumberStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
-  DEFINE_PLATFORM_CODE_STUB(ToNumber, PlatformCodeStub);
-};
-
-class NonNumberToNumberStub final : public PlatformCodeStub {
- public:
-  explicit NonNumberToNumberStub(Isolate* isolate)
-      : PlatformCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
-  DEFINE_PLATFORM_CODE_STUB(NonNumberToNumber, PlatformCodeStub);
-};
-
-class StringToNumberStub final : public PlatformCodeStub {
- public:
-  explicit StringToNumberStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
-  DEFINE_PLATFORM_CODE_STUB(StringToNumber, PlatformCodeStub);
-};
-
-
 class ToStringStub final : public PlatformCodeStub {
  public:
   explicit ToStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
@@ -3236,7 +3090,6 @@
   DEFINE_PLATFORM_CODE_STUB(ToString, PlatformCodeStub);
 };
 
-
 class ToNameStub final : public PlatformCodeStub {
  public:
   explicit ToNameStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
diff --git a/src/codegen.cc b/src/codegen.cc
index 93ae307..4597ae2 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -12,7 +12,6 @@
 #include "src/compiler.h"
 #include "src/debug/debug.h"
 #include "src/parsing/parser.h"
-#include "src/profiler/cpu-profiler.h"
 #include "src/runtime/runtime.h"
 
 namespace v8 {
@@ -61,7 +60,6 @@
   }
 
 UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction)
-UNARY_MATH_FUNCTION(exp, CreateExpFunction)
 
 #undef UNARY_MATH_FUNCTION
 
@@ -147,11 +145,12 @@
 void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
 #ifdef ENABLE_DISASSEMBLER
   AllowDeferredHandleDereference allow_deference_for_print_code;
-  bool print_code = info->isolate()->bootstrapper()->IsActive()
-      ? FLAG_print_builtin_code
-      : (FLAG_print_code ||
-         (info->IsStub() && FLAG_print_code_stubs) ||
-         (info->IsOptimizing() && FLAG_print_opt_code));
+  Isolate* isolate = info->isolate();
+  bool print_code =
+      isolate->bootstrapper()->IsActive()
+          ? FLAG_print_builtin_code
+          : (FLAG_print_code || (info->IsStub() && FLAG_print_code_stubs) ||
+             (info->IsOptimizing() && FLAG_print_opt_code));
   if (print_code) {
     base::SmartArrayPointer<char> debug_name = info->GetDebugName();
     CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
@@ -164,7 +163,8 @@
     if (print_source) {
       Handle<SharedFunctionInfo> shared = info->shared_info();
       Handle<Script> script = info->script();
-      if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+      if (!script->IsUndefined(isolate) &&
+          !script->source()->IsUndefined(isolate)) {
         os << "--- Raw source ---\n";
         StringCharacterStream stream(String::cast(script->source()),
                                      shared->start_position());
diff --git a/src/codegen.h b/src/codegen.h
index f941696..82962ad 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -93,16 +93,13 @@
 // generated code both in runtime and compiled code.
 typedef double (*UnaryMathFunctionWithIsolate)(double x, Isolate* isolate);
 
-UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate);
 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate);
 
 
 double modulo(double x, double y);
 
 // Custom implementation of math functions.
-double fast_exp(double input, Isolate* isolate);
 double fast_sqrt(double input, Isolate* isolate);
-void lazily_initialize_fast_exp(Isolate* isolate);
 void lazily_initialize_fast_sqrt(Isolate* isolate);
 
 
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index aca8cee..53e2190 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -41,7 +41,7 @@
 Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
   DCHECK(generation < generations_);
   Handle<CompilationCacheTable> result;
-  if (tables_[generation]->IsUndefined()) {
+  if (tables_[generation]->IsUndefined(isolate())) {
     result = CompilationCacheTable::New(isolate(), kInitialCacheSize);
     tables_[generation] = *result;
   } else {
@@ -56,7 +56,7 @@
 void CompilationSubCache::Age() {
   // Don't directly age single-generation caches.
   if (generations_ == 1) {
-    if (tables_[0] != isolate()->heap()->undefined_value()) {
+    if (!tables_[0]->IsUndefined(isolate())) {
       CompilationCacheTable::cast(tables_[0])->Age();
     }
     return;
@@ -121,7 +121,7 @@
   // If the script name isn't set, the boilerplate script should have
   // an undefined name to have the same origin.
   if (name.is_null()) {
-    return script->name()->IsUndefined();
+    return script->name()->IsUndefined(isolate());
   }
   // Do the fast bailout checks first.
   if (line_offset != script->line_offset()) return false;
diff --git a/src/compilation-cache.h b/src/compilation-cache.h
index 2295f4c..973673c 100644
--- a/src/compilation-cache.h
+++ b/src/compilation-cache.h
@@ -210,7 +210,7 @@
   explicit CompilationCache(Isolate* isolate);
   ~CompilationCache();
 
-  HashMap* EagerOptimizingSet();
+  base::HashMap* EagerOptimizingSet();
 
   // The number of sub caches covering the different types to cache.
   static const int kSubCacheCount = 4;
diff --git a/src/compilation-statistics.cc b/src/compilation-statistics.cc
index c7e15b2..d4ca39d 100644
--- a/src/compilation-statistics.cc
+++ b/src/compilation-statistics.cc
@@ -54,8 +54,7 @@
   }
 }
 
-
-static void WriteLine(std::ostream& os, const char* name,
+static void WriteLine(std::ostream& os, bool machine_format, const char* name,
                       const CompilationStatistics::BasicStats& stats,
                       const CompilationStatistics::BasicStats& total_stats) {
   const size_t kBufferSize = 128;
@@ -66,17 +65,24 @@
   double size_percent =
       static_cast<double>(stats.total_allocated_bytes_ * 100) /
       static_cast<double>(total_stats.total_allocated_bytes_);
-  base::OS::SNPrintF(buffer, kBufferSize, "%28s %10.3f (%5.1f%%)  %10" PRIuS
-                                          " (%5.1f%%) %10" PRIuS " %10" PRIuS,
-                     name, ms, percent, stats.total_allocated_bytes_,
-                     size_percent, stats.max_allocated_bytes_,
-                     stats.absolute_max_allocated_bytes_);
+  if (machine_format) {
+    base::OS::SNPrintF(buffer, kBufferSize,
+                       "\"%s_time\"=%.3f\n\"%s_space\"=%" PRIuS, name, ms, name,
+                       stats.total_allocated_bytes_);
+    os << buffer;
+  } else {
+    base::OS::SNPrintF(buffer, kBufferSize, "%28s %10.3f (%5.1f%%)  %10" PRIuS
+                                            " (%5.1f%%) %10" PRIuS " %10" PRIuS,
+                       name, ms, percent, stats.total_allocated_bytes_,
+                       size_percent, stats.max_allocated_bytes_,
+                       stats.absolute_max_allocated_bytes_);
 
-  os << buffer;
-  if (stats.function_name_.size() > 0) {
-    os << "   " << stats.function_name_.c_str();
+    os << buffer;
+    if (stats.function_name_.size() > 0) {
+      os << "   " << stats.function_name_.c_str();
+    }
+    os << std::endl;
   }
-  os << std::endl;
 }
 
 
@@ -101,10 +107,10 @@
         "--------------------------------------------------------\n";
 }
 
-
-std::ostream& operator<<(std::ostream& os, const CompilationStatistics& s) {
+std::ostream& operator<<(std::ostream& os, const AsPrintableStatistics& ps) {
   // phase_kind_map_ and phase_map_ don't get mutated, so store a bunch of
   // pointers into them.
+  const CompilationStatistics& s = ps.s;
 
   typedef std::vector<CompilationStatistics::PhaseKindMap::const_iterator>
       SortedPhaseKinds;
@@ -121,22 +127,27 @@
     sorted_phases[it->second.insert_order_] = it;
   }
 
-  WriteHeader(os);
+  if (!ps.machine_output) WriteHeader(os);
   for (auto phase_kind_it : sorted_phase_kinds) {
     const auto& phase_kind_name = phase_kind_it->first;
-    for (auto phase_it : sorted_phases) {
-      const auto& phase_stats = phase_it->second;
-      if (phase_stats.phase_kind_name_ != phase_kind_name) continue;
-      const auto& phase_name = phase_it->first;
-      WriteLine(os, phase_name.c_str(), phase_stats, s.total_stats_);
+    if (!ps.machine_output) {
+      for (auto phase_it : sorted_phases) {
+        const auto& phase_stats = phase_it->second;
+        if (phase_stats.phase_kind_name_ != phase_kind_name) continue;
+        const auto& phase_name = phase_it->first;
+        WriteLine(os, ps.machine_output, phase_name.c_str(), phase_stats,
+                  s.total_stats_);
+      }
+      WritePhaseKindBreak(os);
     }
-    WritePhaseKindBreak(os);
     const auto& phase_kind_stats = phase_kind_it->second;
-    WriteLine(os, phase_kind_name.c_str(), phase_kind_stats, s.total_stats_);
+    WriteLine(os, ps.machine_output, phase_kind_name.c_str(), phase_kind_stats,
+              s.total_stats_);
     os << std::endl;
   }
-  WriteFullLine(os);
-  WriteLine(os, "totals", s.total_stats_, s.total_stats_);
+
+  if (!ps.machine_output) WriteFullLine(os);
+  WriteLine(os, ps.machine_output, "totals", s.total_stats_, s.total_stats_);
 
   return os;
 }
diff --git a/src/compilation-statistics.h b/src/compilation-statistics.h
index 6219180..ceffc2e 100644
--- a/src/compilation-statistics.h
+++ b/src/compilation-statistics.h
@@ -15,6 +15,12 @@
 namespace internal {
 
 class CompilationInfo;
+class CompilationStatistics;
+
+struct AsPrintableStatistics {
+  const CompilationStatistics& s;
+  const bool machine_output;
+};
 
 class CompilationStatistics final : public Malloced {
  public:
@@ -65,7 +71,7 @@
   };
 
   friend std::ostream& operator<<(std::ostream& os,
-                                  const CompilationStatistics& s);
+                                  const AsPrintableStatistics& s);
 
   typedef OrderedStats PhaseKindStats;
   typedef std::map<std::string, PhaseKindStats> PhaseKindMap;
@@ -78,7 +84,7 @@
   DISALLOW_COPY_AND_ASSIGN(CompilationStatistics);
 };
 
-std::ostream& operator<<(std::ostream& os, const CompilationStatistics& s);
+std::ostream& operator<<(std::ostream& os, const AsPrintableStatistics& s);
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler.cc b/src/compiler.cc
index d649950..2a0eda0 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -27,7 +27,6 @@
 #include "src/parsing/parser.h"
 #include "src/parsing/rewriter.h"
 #include "src/parsing/scanner-character-streams.h"
-#include "src/profiler/cpu-profiler.h"
 #include "src/runtime-profiler.h"
 #include "src/snapshot/code-serializer.h"
 #include "src/typing-asm.h"
@@ -135,7 +134,7 @@
       bailout_reason_(kNoReason),
       prologue_offset_(Code::kPrologueOffsetNotSet),
       track_positions_(FLAG_hydrogen_track_positions ||
-                       isolate->cpu_profiler()->is_profiling()),
+                       isolate->is_profiling()),
       parameter_count_(0),
       optimization_id_(-1),
       osr_expr_stack_height_(0),
@@ -200,6 +199,9 @@
     case Code::BYTECODE_HANDLER:
     case Code::HANDLER:
     case Code::BUILTIN:
+#define CASE_KIND(kind) case Code::kind:
+      IC_KIND_LIST(CASE_KIND)
+#undef CASE_KIND
       return StackFrame::STUB;
     case Code::WASM_FUNCTION:
       return StackFrame::WASM;
@@ -376,13 +378,13 @@
              Script::COMPILATION_TYPE_EVAL;
 }
 
-void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
+void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
                                CompilationInfo* info) {
   // Log the code generation. If source information is available include
   // script name and line number. Check explicitly whether logging is
   // enabled as finding the line number is not free.
   if (info->isolate()->logger()->is_logging_code_events() ||
-      info->isolate()->cpu_profiler()->is_profiling()) {
+      info->isolate()->is_profiling()) {
     Handle<SharedFunctionInfo> shared = info->shared_info();
     Handle<Script> script = info->parse_info()->script();
     Handle<AbstractCode> abstract_code =
@@ -399,41 +401,55 @@
     String* script_name = script->name()->IsString()
                               ? String::cast(script->name())
                               : info->isolate()->heap()->empty_string();
-    Logger::LogEventsAndTags log_tag = Logger::ToNativeByScript(tag, *script);
+    CodeEventListener::LogEventsAndTags log_tag =
+        Logger::ToNativeByScript(tag, *script);
     PROFILE(info->isolate(),
             CodeCreateEvent(log_tag, *abstract_code, *shared, script_name,
                             line_num, column_num));
   }
 }
 
-void EnsureFeedbackVector(CompilationInfo* info) {
+void EnsureFeedbackMetadata(CompilationInfo* info) {
   DCHECK(info->has_shared_info());
 
-  // If no type feedback vector exists, we create one now. At this point the
+  // If no type feedback metadata exists, we create it now. At this point the
   // AstNumbering pass has already run. Note the snapshot can contain outdated
   // vectors for a different configuration, hence we also recreate a new vector
   // when the function is not compiled (i.e. no code was serialized).
-  if (info->shared_info()->feedback_vector()->is_empty() ||
+
+  // TODO(mvstanton): reintroduce is_empty() predicate to feedback_metadata().
+  if (info->shared_info()->feedback_metadata()->length() == 0 ||
       !info->shared_info()->is_compiled()) {
     Handle<TypeFeedbackMetadata> feedback_metadata = TypeFeedbackMetadata::New(
         info->isolate(), info->literal()->feedback_vector_spec());
-    Handle<TypeFeedbackVector> feedback_vector =
-        TypeFeedbackVector::New(info->isolate(), feedback_metadata);
-    info->shared_info()->set_feedback_vector(*feedback_vector);
+    info->shared_info()->set_feedback_metadata(*feedback_metadata);
   }
 
   // It's very important that recompiles do not alter the structure of the type
   // feedback vector. Verify that the structure fits the function literal.
-  CHECK(!info->shared_info()->feedback_vector()->metadata()->SpecDiffersFrom(
+  CHECK(!info->shared_info()->feedback_metadata()->SpecDiffersFrom(
       info->literal()->feedback_vector_spec()));
 }
 
 bool UseIgnition(CompilationInfo* info) {
-  if (info->is_debug()) return false;
+  DCHECK(info->has_shared_info());
+
+  // When requesting debug code as a replacement for existing code, we provide
+  // the same kind as the existing code (to prevent implicit tier-change).
+  if (info->is_debug() && info->shared_info()->is_compiled()) {
+    return info->shared_info()->HasBytecodeArray();
+  }
+
+  // For generator or async functions we might avoid Ignition wholesale.
   if (info->shared_info()->is_resumable() && !FLAG_ignition_generators) {
     return false;
   }
 
+  // Since we can't OSR from Ignition, skip Ignition for asm.js functions.
+  if (info->shared_info()->asm_function()) {
+    return false;
+  }
+
   // Checks whether top level functions should be passed by the filter.
   if (info->shared_info()->is_toplevel()) {
     Vector<const char> filter = CStrVector(FLAG_ignition_filter);
@@ -445,26 +461,15 @@
 }
 
 int CodeAndMetadataSize(CompilationInfo* info) {
-  int size = 0;
   if (info->has_bytecode_array()) {
-    Handle<BytecodeArray> bytecode_array = info->bytecode_array();
-    size += bytecode_array->BytecodeArraySize();
-    size += bytecode_array->constant_pool()->Size();
-    size += bytecode_array->handler_table()->Size();
-    size += bytecode_array->source_position_table()->Size();
-  } else {
-    Handle<Code> code = info->code();
-    size += code->CodeSize();
-    size += code->relocation_info()->Size();
-    size += code->deoptimization_data()->Size();
-    size += code->handler_table()->Size();
+    return info->bytecode_array()->SizeIncludingMetadata();
   }
-  return size;
+  return info->code()->SizeIncludingMetadata();
 }
 
 bool GenerateUnoptimizedCode(CompilationInfo* info) {
   bool success;
-  EnsureFeedbackVector(info);
+  EnsureFeedbackMetadata(info);
   if (FLAG_validate_asm && info->scope()->asm_module()) {
     AsmTyper typer(info->isolate(), info->zone(), *(info->script()),
                    info->literal());
@@ -511,6 +516,12 @@
 
 void InstallSharedCompilationResult(CompilationInfo* info,
                                     Handle<SharedFunctionInfo> shared) {
+  // TODO(mstarzinger): Compiling for debug code might be used to reveal inner
+  // functions via {FindSharedFunctionInfoInScript}, in which case we end up
+  // regenerating existing bytecode. Fix this!
+  if (info->is_debug() && info->has_bytecode_array()) {
+    shared->ClearBytecodeArray();
+  }
   // Assert that we are not overwriting (possibly patched) debug code.
   DCHECK(!shared->HasDebugInfo());
   DCHECK(!info->code().is_null());
@@ -540,7 +551,7 @@
   InstallSharedCompilationResult(info, shared);
 
   // Record the function compilation event.
-  RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info);
+  RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, info);
 
   return info->code();
 }
@@ -639,9 +650,14 @@
   // Parsing is not required when optimizing from existing bytecode.
   if (!info->is_optimizing_from_bytecode()) {
     if (!Compiler::ParseAndAnalyze(info->parse_info())) return false;
+    EnsureFeedbackMetadata(info);
   }
 
+  JSFunction::EnsureLiterals(info->closure());
+
   TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
+  RuntimeCallTimerScope runtimeTimer(isolate,
+                                     &RuntimeCallStats::RecompileSynchronous);
   TRACE_EVENT0("v8", "V8.RecompileSynchronous");
 
   if (job->CreateGraph() != CompilationJob::SUCCEEDED ||
@@ -659,7 +675,7 @@
   job->RecordOptimizationStats();
   DCHECK(!isolate->has_pending_exception());
   InsertCodeIntoOptimizedCodeMap(info);
-  RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info);
+  RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, info);
   return true;
 }
 
@@ -683,13 +699,18 @@
   // Parsing is not required when optimizing from existing bytecode.
   if (!info->is_optimizing_from_bytecode()) {
     if (!Compiler::ParseAndAnalyze(info->parse_info())) return false;
+    EnsureFeedbackMetadata(info);
   }
 
+  JSFunction::EnsureLiterals(info->closure());
+
   // Reopen handles in the new CompilationHandleScope.
   info->ReopenHandlesInNewHandleScope();
   info->parse_info()->ReopenHandlesInNewHandleScope();
 
   TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
+  RuntimeCallTimerScope runtimeTimer(info->isolate(),
+                                     &RuntimeCallStats::RecompileSynchronous);
   TRACE_EVENT0("v8", "V8.RecompileSynchronous");
 
   if (job->CreateGraph() != CompilationJob::SUCCEEDED) return false;
@@ -757,6 +778,7 @@
 
   CanonicalHandleScope canonical(isolate);
   TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
+  RuntimeCallTimerScope runtimeTimer(isolate, &RuntimeCallStats::OptimizeCode);
   TRACE_EVENT0("v8", "V8.OptimizeCode");
 
   // TurboFan can optimize directly from existing bytecode.
@@ -789,18 +811,23 @@
 class InterpreterActivationsFinder : public ThreadVisitor,
                                      public OptimizedFunctionVisitor {
  public:
-  SharedFunctionInfo* shared_;
-  bool has_activations_;
-
   explicit InterpreterActivationsFinder(SharedFunctionInfo* shared)
       : shared_(shared), has_activations_(false) {}
 
   void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
+    Address* activation_pc_address = nullptr;
     JavaScriptFrameIterator it(isolate, top);
-    for (; !it.done() && !has_activations_; it.Advance()) {
+    for (; !it.done(); it.Advance()) {
       JavaScriptFrame* frame = it.frame();
       if (!frame->is_interpreted()) continue;
-      if (frame->function()->shared() == shared_) has_activations_ = true;
+      if (frame->function()->shared() == shared_) {
+        has_activations_ = true;
+        activation_pc_address = frame->pc_address();
+      }
+    }
+
+    if (activation_pc_address) {
+      activation_pc_addresses_.push_back(activation_pc_address);
     }
   }
 
@@ -810,19 +837,39 @@
 
   void EnterContext(Context* context) {}
   void LeaveContext(Context* context) {}
+
+  bool MarkActivationsForBaselineOnReturn(Isolate* isolate) {
+    if (activation_pc_addresses_.empty()) return false;
+
+    for (Address* activation_pc_address : activation_pc_addresses_) {
+      DCHECK(isolate->inner_pointer_to_code_cache()
+                 ->GetCacheEntry(*activation_pc_address)
+                 ->code->is_interpreter_trampoline_builtin());
+      *activation_pc_address =
+          isolate->builtins()->InterpreterMarkBaselineOnReturn()->entry();
+    }
+    return true;
+  }
+
+  bool has_activations() { return has_activations_; }
+
+ private:
+  SharedFunctionInfo* shared_;
+  bool has_activations_;
+  std::vector<Address*> activation_pc_addresses_;
 };
 
-bool HasInterpreterActivations(Isolate* isolate, SharedFunctionInfo* shared) {
-  InterpreterActivationsFinder activations_finder(shared);
-  activations_finder.VisitThread(isolate, isolate->thread_local_top());
-  isolate->thread_manager()->IterateArchivedThreads(&activations_finder);
+bool HasInterpreterActivations(
+    Isolate* isolate, InterpreterActivationsFinder* activations_finder) {
+  activations_finder->VisitThread(isolate, isolate->thread_local_top());
+  isolate->thread_manager()->IterateArchivedThreads(activations_finder);
   if (FLAG_turbo_from_bytecode) {
     // If we are able to optimize functions directly from bytecode, then there
     // might be optimized functions that rely on bytecode being around. We need
     // to prevent switching the given function to baseline code in those cases.
-    Deoptimizer::VisitAllOptimizedFunctions(isolate, &activations_finder);
+    Deoptimizer::VisitAllOptimizedFunctions(isolate, activations_finder);
   }
-  return activations_finder.has_activations_;
+  return activations_finder->has_activations();
 }
 
 MaybeHandle<Code> GetBaselineCode(Handle<JSFunction> function) {
@@ -849,10 +896,11 @@
     return MaybeHandle<Code>();
   }
 
-  // TODO(4280): For now we do not switch generators to baseline code because
-  // there might be suspended activations stored in generator objects on the
-  // heap. We could eventually go directly to TurboFan in this case.
-  if (function->shared()->is_generator()) {
+  // TODO(4280): For now we do not switch generators or async functions to
+  // baseline code because there might be suspended activations stored in
+  // generator objects on the heap. We could eventually go directly to
+  // TurboFan in this case.
+  if (function->shared()->is_resumable()) {
     return MaybeHandle<Code>();
   }
 
@@ -860,12 +908,22 @@
   // of interpreter activations of the given function. The reasons are:
   //  1) The debugger assumes each function is either full-code or bytecode.
   //  2) The underlying bytecode is cleared below, breaking stack unwinding.
-  if (HasInterpreterActivations(isolate, function->shared())) {
+  InterpreterActivationsFinder activations_finder(function->shared());
+  if (HasInterpreterActivations(isolate, &activations_finder)) {
     if (FLAG_trace_opt) {
       OFStream os(stdout);
       os << "[unable to switch " << Brief(*function) << " due to activations]"
          << std::endl;
     }
+
+    if (activations_finder.MarkActivationsForBaselineOnReturn(isolate)) {
+      if (FLAG_trace_opt) {
+        OFStream os(stdout);
+        os << "[marking " << Brief(function->shared())
+           << " for baseline recompilation on return]" << std::endl;
+      }
+    }
+
     return MaybeHandle<Code>();
   }
 
@@ -899,7 +957,7 @@
   InstallSharedCompilationResult(&info, shared);
 
   // Record the function compilation event.
-  RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, &info);
+  RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, &info);
 
   return info.code();
 }
@@ -909,6 +967,8 @@
   DCHECK(!isolate->has_pending_exception());
   DCHECK(!function->is_compiled());
   TimerEventScope<TimerEventCompileCode> compile_timer(isolate);
+  RuntimeCallTimerScope runtimeTimer(isolate,
+                                     &RuntimeCallStats::CompileCodeLazy);
   TRACE_EVENT0("v8", "V8.CompileCode");
   AggregatedHistogramTimerScope timer(isolate->counters()->compile_lazy());
 
@@ -963,6 +1023,7 @@
 Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
   Isolate* isolate = info->isolate();
   TimerEventScope<TimerEventCompileCode> timer(isolate);
+  RuntimeCallTimerScope runtimeTimer(isolate, &RuntimeCallStats::CompileCode);
   TRACE_EVENT0("v8", "V8.CompileCode");
   PostponeInterruptsScope postpone(isolate);
   DCHECK(!isolate->native_context().is_null());
@@ -1057,10 +1118,10 @@
         script->name()->IsString()
             ? Handle<String>(String::cast(script->name()))
             : isolate->factory()->empty_string();
-    Logger::LogEventsAndTags log_tag =
+    CodeEventListener::LogEventsAndTags log_tag =
         parse_info->is_eval()
-            ? Logger::EVAL_TAG
-            : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script);
+            ? CodeEventListener::EVAL_TAG
+            : Logger::ToNativeByScript(CodeEventListener::SCRIPT_TAG, *script);
 
     PROFILE(isolate, CodeCreateEvent(log_tag, result->abstract_code(), *result,
                                      *script_name));
@@ -1110,6 +1171,7 @@
 
   // Install code on closure.
   function->ReplaceCode(*code);
+  JSFunction::EnsureLiterals(function);
 
   // Check postconditions on success.
   DCHECK(!isolate->has_pending_exception());
@@ -1133,6 +1195,7 @@
 
   // Install code on closure.
   function->ReplaceCode(*code);
+  JSFunction::EnsureLiterals(function);
 
   // Check postconditions on success.
   DCHECK(!isolate->has_pending_exception());
@@ -1166,6 +1229,7 @@
 
   // Install code on closure.
   function->ReplaceCode(*code);
+  JSFunction::EnsureLiterals(function);
 
   // Check postconditions on success.
   DCHECK(!isolate->has_pending_exception());
@@ -1271,10 +1335,11 @@
     CompilationInfo unoptimized(info->parse_info(), info->closure());
     unoptimized.EnableDeoptimizationSupport();
 
-    // TODO(4280): For now we do not switch generators to baseline code because
-    // there might be suspended activations stored in generator objects on the
-    // heap. We could eventually go directly to TurboFan in this case.
-    if (shared->is_generator()) return false;
+    // TODO(4280): For now we do not switch generators or async functions to
+    // baseline code because there might be suspended activations stored in
+    // generator objects on the heap. We could eventually go directly to
+    // TurboFan in this case.
+    if (shared->is_resumable()) return false;
 
     // TODO(4280): For now we disable switching to baseline code in the presence
     // of interpreter activations of the given function. The reasons are:
@@ -1282,9 +1347,11 @@
     //  2) The underlying bytecode is cleared below, breaking stack unwinding.
     // The expensive check for activations only needs to be done when the given
     // function has bytecode, otherwise we can be sure there are no activations.
-    if (shared->HasBytecodeArray() &&
-        HasInterpreterActivations(info->isolate(), *shared)) {
-      return false;
+    if (shared->HasBytecodeArray()) {
+      InterpreterActivationsFinder activations_finder(*shared);
+      if (HasInterpreterActivations(info->isolate(), &activations_finder)) {
+        return false;
+      }
     }
 
     // If the current code has reloc info for serialization, also include
@@ -1294,7 +1361,7 @@
         shared->code()->has_reloc_info_for_serialization()) {
       unoptimized.PrepareForSerializing();
     }
-    EnsureFeedbackVector(&unoptimized);
+    EnsureFeedbackMetadata(&unoptimized);
     if (!FullCodeGenerator::MakeCode(&unoptimized)) return false;
 
     // TODO(4280): For now we play it safe and remove the bytecode array when we
@@ -1312,7 +1379,8 @@
     shared->EnableDeoptimizationSupport(*unoptimized.code());
 
     // The existing unoptimized code was replaced with the new one.
-    RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, &unoptimized);
+    RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG,
+                              &unoptimized);
   }
   return true;
 }
@@ -1421,6 +1489,8 @@
         !isolate->debug()->is_loaded()) {
       // Then check cached code provided by embedder.
       HistogramTimerScope timer(isolate->counters()->compile_deserialize());
+      RuntimeCallTimerScope runtimeTimer(isolate,
+                                         &RuntimeCallStats::CompileDeserialize);
       TRACE_EVENT0("v8", "V8.CompileDeserialize");
       Handle<SharedFunctionInfo> result;
       if (CodeSerializer::Deserialize(isolate, *cached_data, source)
@@ -1492,6 +1562,8 @@
           compile_options == ScriptCompiler::kProduceCodeCache) {
         HistogramTimerScope histogram_timer(
             isolate->counters()->compile_serialize());
+        RuntimeCallTimerScope runtimeTimer(isolate,
+                                           &RuntimeCallStats::CompileSerialize);
         TRACE_EVENT0("v8", "V8.CompileSerialize");
         *cached_data = CodeSerializer::Serialize(isolate, result, source);
         if (FLAG_profile_deserialization) {
@@ -1608,6 +1680,7 @@
 
   // Generate code
   TimerEventScope<TimerEventCompileCode> timer(isolate);
+  RuntimeCallTimerScope runtimeTimer(isolate, &RuntimeCallStats::CompileCode);
   TRACE_EVENT0("v8", "V8.CompileCode");
   if (lazy) {
     info.SetCode(isolate->builtins()->CompileLazy());
@@ -1628,7 +1701,7 @@
   }
 
   if (maybe_existing.is_null()) {
-    RecordFunctionCompilation(Logger::FUNCTION_TAG, &info);
+    RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, &info);
   }
 
   return result;
@@ -1649,14 +1722,13 @@
   Handle<JSFunction> fun = Handle<JSFunction>::cast(Utils::OpenHandle(
       *fun_template->GetFunction(v8_isolate->GetCurrentContext())
            .ToLocalChecked()));
-  const int literals = fun->NumberOfLiterals();
   Handle<Code> code = Handle<Code>(fun->shared()->code());
   Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
   Handle<SharedFunctionInfo> shared = isolate->factory()->NewSharedFunctionInfo(
-      name, literals, FunctionKind::kNormalFunction, code,
+      name, fun->shared()->num_literals(), FunctionKind::kNormalFunction, code,
       Handle<ScopeInfo>(fun->shared()->scope_info()));
   shared->set_construct_stub(*construct_stub);
-  shared->set_feedback_vector(fun->shared()->feedback_vector());
+  shared->set_feedback_metadata(fun->shared()->feedback_metadata());
 
   // Copy the function data to the shared function info.
   shared->set_function_data(fun->shared()->function_data());
@@ -1682,6 +1754,8 @@
 
   VMState<COMPILER> state(isolate);
   TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
+  RuntimeCallTimerScope runtimeTimer(isolate,
+                                     &RuntimeCallStats::RecompileSynchronous);
   TRACE_EVENT0("v8", "V8.RecompileSynchronous");
 
   Handle<SharedFunctionInfo> shared = info->shared_info();
@@ -1701,7 +1775,7 @@
       job->RetryOptimization(kBailedOutDueToDependencyChange);
     } else if (job->GenerateCode() == CompilationJob::SUCCEEDED) {
       job->RecordOptimizationStats();
-      RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info);
+      RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, info);
       if (shared->SearchOptimizedCodeMap(info->context()->native_context(),
                                          info->osr_ast_id()).code == nullptr) {
         InsertCodeIntoOptimizedCodeMap(info);
@@ -1743,21 +1817,11 @@
   }
 
   if (cached.literals != nullptr) {
+    DCHECK(shared->is_compiled());
     function->set_literals(cached.literals);
-  } else {
-    Isolate* isolate = function->GetIsolate();
-    int number_of_literals = shared->num_literals();
-    Handle<LiteralsArray> literals =
-        LiteralsArray::New(isolate, handle(shared->feedback_vector()),
-                           number_of_literals, pretenure);
-    function->set_literals(*literals);
-
-    // Cache context-specific literals.
-    MaybeHandle<Code> code;
-    if (cached.code != nullptr) code = handle(cached.code);
-    Handle<Context> native_context(function->context()->native_context());
-    SharedFunctionInfo::AddToOptimizedCodeMap(shared, native_context, code,
-                                              literals, BailoutId::None());
+  } else if (shared->is_compiled()) {
+    // TODO(mvstanton): pass pretenure flag to EnsureLiterals.
+    JSFunction::EnsureLiterals(function);
   }
 }
 
diff --git a/src/compiler.h b/src/compiler.h
index 64bc88d..e0ebd45 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -146,6 +146,7 @@
     kSourcePositionsEnabled = 1 << 15,
     kBailoutOnUninitialized = 1 << 16,
     kOptimizeFromBytecode = 1 << 17,
+    kTypeFeedbackEnabled = 1 << 18,
   };
 
   CompilationInfo(ParseInfo* parse_info, Handle<JSFunction> closure);
@@ -256,6 +257,12 @@
     return GetFlag(kDeoptimizationEnabled);
   }
 
+  void MarkAsTypeFeedbackEnabled() { SetFlag(kTypeFeedbackEnabled); }
+
+  bool is_type_feedback_enabled() const {
+    return GetFlag(kTypeFeedbackEnabled);
+  }
+
   void MarkAsSourcePositionsEnabled() { SetFlag(kSourcePositionsEnabled); }
 
   bool is_source_positions_enabled() const {
diff --git a/src/compiler/OWNERS b/src/compiler/OWNERS
index 1257e23..02de4ed 100644
--- a/src/compiler/OWNERS
+++ b/src/compiler/OWNERS
@@ -1,6 +1,7 @@
 set noparent
 
 bmeurer@chromium.org
+epertoso@chromium.org
 jarin@chromium.org
 mstarzinger@chromium.org
 mtrofin@chromium.org
diff --git a/src/compiler/access-builder.cc b/src/compiler/access-builder.cc
index d4187fa..0eac109 100644
--- a/src/compiler/access-builder.cc
+++ b/src/compiler/access-builder.cc
@@ -98,7 +98,6 @@
   return access;
 }
 
-
 // static
 FieldAccess AccessBuilder::ForJSFunctionLiterals() {
   FieldAccess access = {
@@ -130,6 +129,63 @@
 }
 
 // static
+FieldAccess AccessBuilder::ForJSGeneratorObjectContext() {
+  FieldAccess access = {kTaggedBase,
+                        JSGeneratorObject::kContextOffset,
+                        Handle<Name>(),
+                        Type::Internal(),
+                        MachineType::AnyTagged(),
+                        kPointerWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSGeneratorObjectContinuation() {
+  TypeCache const& type_cache = TypeCache::Get();
+  FieldAccess access = {kTaggedBase,
+                        JSGeneratorObject::kContinuationOffset,
+                        Handle<Name>(),
+                        type_cache.kSmi,
+                        MachineType::AnyTagged(),
+                        kNoWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSGeneratorObjectInputOrDebugPos() {
+  FieldAccess access = {kTaggedBase,
+                        JSGeneratorObject::kInputOrDebugPosOffset,
+                        Handle<Name>(),
+                        Type::Any(),
+                        MachineType::AnyTagged(),
+                        kFullWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSGeneratorObjectOperandStack() {
+  FieldAccess access = {kTaggedBase,
+                        JSGeneratorObject::kOperandStackOffset,
+                        Handle<Name>(),
+                        Type::Internal(),
+                        MachineType::AnyTagged(),
+                        kPointerWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSGeneratorObjectResumeMode() {
+  TypeCache const& type_cache = TypeCache::Get();
+  FieldAccess access = {kTaggedBase,
+                        JSGeneratorObject::kResumeModeOffset,
+                        Handle<Name>(),
+                        type_cache.kSmi,
+                        MachineType::AnyTagged(),
+                        kNoWriteBarrier};
+  return access;
+}
+
+// static
 FieldAccess AccessBuilder::ForJSArrayLength(ElementsKind elements_kind) {
   TypeCache const& type_cache = TypeCache::Get();
   FieldAccess access = {kTaggedBase,
@@ -312,6 +368,14 @@
 
 
 // static
+FieldAccess AccessBuilder::ForNameHashField() {
+  FieldAccess access = {kTaggedBase,           Name::kHashFieldOffset,
+                        Handle<Name>(),        Type::Internal(),
+                        MachineType::Uint32(), kNoWriteBarrier};
+  return access;
+}
+
+// static
 FieldAccess AccessBuilder::ForStringLength() {
   FieldAccess access = {kTaggedBase,
                         String::kLengthOffset,
@@ -419,19 +483,6 @@
   return access;
 }
 
-
-// static
-FieldAccess AccessBuilder::ForSharedFunctionInfoTypeFeedbackVector() {
-  FieldAccess access = {kTaggedBase,
-                        SharedFunctionInfo::kFeedbackVectorOffset,
-                        Handle<Name>(),
-                        Type::Any(),
-                        MachineType::AnyTagged(),
-                        kPointerWriteBarrier};
-  return access;
-}
-
-
 // static
 ElementAccess AccessBuilder::ForFixedArrayElement() {
   ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Tagged(),
diff --git a/src/compiler/access-builder.h b/src/compiler/access-builder.h
index b36277e..8345225 100644
--- a/src/compiler/access-builder.h
+++ b/src/compiler/access-builder.h
@@ -52,6 +52,21 @@
   // Provides access to JSFunction::next_function_link() field.
   static FieldAccess ForJSFunctionNextFunctionLink();
 
+  // Provides access to JSGeneratorObject::context() field.
+  static FieldAccess ForJSGeneratorObjectContext();
+
+  // Provides access to JSGeneratorObject::continuation() field.
+  static FieldAccess ForJSGeneratorObjectContinuation();
+
+  // Provides access to JSGeneratorObject::input_or_debug_pos() field.
+  static FieldAccess ForJSGeneratorObjectInputOrDebugPos();
+
+  // Provides access to JSGeneratorObject::operand_stack() field.
+  static FieldAccess ForJSGeneratorObjectOperandStack();
+
+  // Provides access to JSGeneratorObject::resume_mode() field.
+  static FieldAccess ForJSGeneratorObjectResumeMode();
+
   // Provides access to JSArray::length() field.
   static FieldAccess ForJSArrayLength(ElementsKind elements_kind);
 
@@ -103,6 +118,9 @@
   // Provides access to Map::prototype() field.
   static FieldAccess ForMapPrototype();
 
+  // Provides access to Name::hash_field() field.
+  static FieldAccess ForNameHashField();
+
   // Provides access to String::length() field.
   static FieldAccess ForStringLength();
 
@@ -129,9 +147,6 @@
   static FieldAccess ForPropertyCellValue();
   static FieldAccess ForPropertyCellValue(Type* type);
 
-  // Provides access to SharedFunctionInfo::feedback_vector() field.
-  static FieldAccess ForSharedFunctionInfoTypeFeedbackVector();
-
   // Provides access to FixedArray elements.
   static ElementAccess ForFixedArrayElement();
 
diff --git a/src/compiler/access-info.cc b/src/compiler/access-info.cc
index e38f629..768b985 100644
--- a/src/compiler/access-info.cc
+++ b/src/compiler/access-info.cc
@@ -9,7 +9,7 @@
 #include "src/compiler/access-info.h"
 #include "src/field-index-inl.h"
 #include "src/field-type.h"
-#include "src/objects-inl.h"  // TODO(mstarzinger): Temporary cycle breaker!
+#include "src/objects-inl.h"
 #include "src/type-cache.h"
 
 namespace v8 {
@@ -75,10 +75,9 @@
 // static
 PropertyAccessInfo PropertyAccessInfo::DataField(
     Type* receiver_type, FieldIndex field_index, Type* field_type,
-    FieldCheck field_check, MaybeHandle<JSObject> holder,
-    MaybeHandle<Map> transition_map) {
-  return PropertyAccessInfo(holder, transition_map, field_index, field_check,
-                            field_type, receiver_type);
+    MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map) {
+  return PropertyAccessInfo(holder, transition_map, field_index, field_type,
+                            receiver_type);
 }
 
 
@@ -114,21 +113,17 @@
       holder_(holder),
       field_type_(Type::Any()) {}
 
-
 PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
                                        MaybeHandle<Map> transition_map,
-                                       FieldIndex field_index,
-                                       FieldCheck field_check, Type* field_type,
+                                       FieldIndex field_index, Type* field_type,
                                        Type* receiver_type)
     : kind_(kDataField),
       receiver_type_(receiver_type),
       transition_map_(transition_map),
       holder_(holder),
       field_index_(field_index),
-      field_check_(field_check),
       field_type_(field_type) {}
 
-
 AccessInfoFactory::AccessInfoFactory(CompilationDependencies* dependencies,
                                      Handle<Context> native_context, Zone* zone)
     : dependencies_(dependencies),
@@ -299,8 +294,7 @@
           DCHECK(field_type->Is(Type::TaggedPointer()));
         }
         *access_info = PropertyAccessInfo::DataField(
-            Type::Class(receiver_map, zone()), field_index, field_type,
-            FieldCheck::kNone, holder);
+            Type::Class(receiver_map, zone()), field_index, field_type, holder);
         return true;
       } else {
         // TODO(bmeurer): Add support for accessors.
@@ -327,7 +321,7 @@
               .ToHandle(&constructor)) {
         map = handle(constructor->initial_map(), isolate());
         DCHECK(map->prototype()->IsJSObject());
-      } else if (map->prototype()->IsNull()) {
+      } else if (map->prototype()->IsNull(isolate())) {
         // Store to property not found on the receiver or any prototype, we need
         // to transition to a new data property.
         // Implemented according to ES6 section 9.1.9 [[Set]] (P, V, Receiver)
@@ -404,26 +398,6 @@
                                                  field_index, field_type);
     return true;
   }
-  // Check for special JSArrayBufferView field accessors.
-  if (Accessors::IsJSArrayBufferViewFieldAccessor(map, name, &offset)) {
-    FieldIndex field_index = FieldIndex::ForInObjectOffset(offset);
-    Type* field_type = Type::Tagged();
-    if (Name::Equals(factory()->byte_length_string(), name) ||
-        Name::Equals(factory()->byte_offset_string(), name)) {
-      // The JSArrayBufferView::byte_length and JSArrayBufferView::byte_offset
-      // properties are always numbers in the range [0, kMaxSafeInteger].
-      field_type = type_cache_.kPositiveSafeInteger;
-    } else if (map->IsJSTypedArrayMap()) {
-      DCHECK(Name::Equals(factory()->length_string(), name));
-      // The JSTypedArray::length property is always a number in the range
-      // [0, kMaxSafeInteger].
-      field_type = type_cache_.kPositiveSafeInteger;
-    }
-    *access_info = PropertyAccessInfo::DataField(
-        Type::Class(map, zone()), field_index, field_type,
-        FieldCheck::kJSArrayBufferViewBufferNotNeutered);
-    return true;
-  }
   return false;
 }
 
@@ -471,9 +445,9 @@
       DCHECK(field_type->Is(Type::TaggedPointer()));
     }
     dependencies()->AssumeMapNotDeprecated(transition_map);
-    *access_info = PropertyAccessInfo::DataField(
-        Type::Class(map, zone()), field_index, field_type, FieldCheck::kNone,
-        holder, transition_map);
+    *access_info =
+        PropertyAccessInfo::DataField(Type::Class(map, zone()), field_index,
+                                      field_type, holder, transition_map);
     return true;
   }
   return false;
diff --git a/src/compiler/access-info.h b/src/compiler/access-info.h
index cae1191..1556e0e 100644
--- a/src/compiler/access-info.h
+++ b/src/compiler/access-info.h
@@ -53,16 +53,6 @@
 };
 
 
-// Additional checks that need to be perform for data field accesses.
-enum class FieldCheck : uint8_t {
-  // No additional checking needed.
-  kNone,
-  // Check that the [[ViewedArrayBuffer]] of {JSArrayBufferView}s
-  // was not neutered.
-  kJSArrayBufferViewBufferNotNeutered,
-};
-
-
 // This class encapsulates all information required to access a certain
 // object property, either on the object itself or on the prototype chain.
 class PropertyAccessInfo final {
@@ -76,7 +66,6 @@
                                          MaybeHandle<JSObject> holder);
   static PropertyAccessInfo DataField(
       Type* receiver_type, FieldIndex field_index, Type* field_type,
-      FieldCheck field_check = FieldCheck::kNone,
       MaybeHandle<JSObject> holder = MaybeHandle<JSObject>(),
       MaybeHandle<Map> transition_map = MaybeHandle<Map>());
 
@@ -92,7 +81,6 @@
   MaybeHandle<JSObject> holder() const { return holder_; }
   MaybeHandle<Map> transition_map() const { return transition_map_; }
   Handle<Object> constant() const { return constant_; }
-  FieldCheck field_check() const { return field_check_; }
   FieldIndex field_index() const { return field_index_; }
   Type* field_type() const { return field_type_; }
   Type* receiver_type() const { return receiver_type_; }
@@ -103,8 +91,7 @@
                      Type* receiver_type);
   PropertyAccessInfo(MaybeHandle<JSObject> holder,
                      MaybeHandle<Map> transition_map, FieldIndex field_index,
-                     FieldCheck field_check, Type* field_type,
-                     Type* receiver_type);
+                     Type* field_type, Type* receiver_type);
 
   Kind kind_;
   Type* receiver_type_;
@@ -112,7 +99,6 @@
   MaybeHandle<Map> transition_map_;
   MaybeHandle<JSObject> holder_;
   FieldIndex field_index_;
-  FieldCheck field_check_;
   Type* field_type_;
 };
 
diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc
index 2c9415e..e1cf2a6 100644
--- a/src/compiler/arm/code-generator-arm.cc
+++ b/src/compiler/arm/code-generator-arm.cc
@@ -27,30 +27,6 @@
   ArmOperandConverter(CodeGenerator* gen, Instruction* instr)
       : InstructionOperandConverter(gen, instr) {}
 
-  SwVfpRegister OutputFloat32Register(size_t index = 0) {
-    return ToFloat32Register(instr_->OutputAt(index));
-  }
-
-  SwVfpRegister InputFloat32Register(size_t index) {
-    return ToFloat32Register(instr_->InputAt(index));
-  }
-
-  SwVfpRegister ToFloat32Register(InstructionOperand* op) {
-    return ToFloat64Register(op).low();
-  }
-
-  LowDwVfpRegister OutputFloat64Register(size_t index = 0) {
-    return ToFloat64Register(instr_->OutputAt(index));
-  }
-
-  LowDwVfpRegister InputFloat64Register(size_t index) {
-    return ToFloat64Register(instr_->InputAt(index));
-  }
-
-  LowDwVfpRegister ToFloat64Register(InstructionOperand* op) {
-    return LowDwVfpRegister::from_code(ToDoubleRegister(op).code());
-  }
-
   SBit OutputSBit() const {
     switch (instr_->flags_mode()) {
       case kFlags_branch:
@@ -125,13 +101,16 @@
       case kMode_Operand2_R:
       case kMode_Operand2_R_ASR_I:
       case kMode_Operand2_R_ASR_R:
-      case kMode_Operand2_R_LSL_I:
       case kMode_Operand2_R_LSL_R:
       case kMode_Operand2_R_LSR_I:
       case kMode_Operand2_R_LSR_R:
       case kMode_Operand2_R_ROR_I:
       case kMode_Operand2_R_ROR_R:
         break;
+      case kMode_Operand2_R_LSL_I:
+        *first_index += 3;
+        return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
+                          LSL, InputInt32(index + 2));
       case kMode_Offset_RI:
         *first_index += 2;
         return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
@@ -162,9 +141,9 @@
 
 namespace {
 
-class OutOfLineLoadFloat32 final : public OutOfLineCode {
+class OutOfLineLoadFloat final : public OutOfLineCode {
  public:
-  OutOfLineLoadFloat32(CodeGenerator* gen, SwVfpRegister result)
+  OutOfLineLoadFloat(CodeGenerator* gen, SwVfpRegister result)
       : OutOfLineCode(gen), result_(result) {}
 
   void Generate() final {
@@ -177,10 +156,9 @@
   SwVfpRegister const result_;
 };
 
-
-class OutOfLineLoadFloat64 final : public OutOfLineCode {
+class OutOfLineLoadDouble final : public OutOfLineCode {
  public:
-  OutOfLineLoadFloat64(CodeGenerator* gen, DwVfpRegister result)
+  OutOfLineLoadDouble(CodeGenerator* gen, DwVfpRegister result)
       : OutOfLineCode(gen), result_(result) {}
 
   void Generate() final {
@@ -327,24 +305,22 @@
 
 }  // namespace
 
-
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(width)                           \
-  do {                                                               \
-    auto result = i.OutputFloat##width##Register();                  \
-    auto offset = i.InputRegister(0);                                \
-    if (instr->InputAt(1)->IsRegister()) {                           \
-      __ cmp(offset, i.InputRegister(1));                            \
-    } else {                                                         \
-      __ cmp(offset, i.InputImmediate(1));                           \
-    }                                                                \
-    auto ool = new (zone()) OutOfLineLoadFloat##width(this, result); \
-    __ b(hs, ool->entry());                                          \
-    __ vldr(result, i.InputOffset(2));                               \
-    __ bind(ool->exit());                                            \
-    DCHECK_EQ(LeaveCC, i.OutputSBit());                              \
+#define ASSEMBLE_CHECKED_LOAD_FP(Type)                         \
+  do {                                                         \
+    auto result = i.Output##Type##Register();                  \
+    auto offset = i.InputRegister(0);                          \
+    if (instr->InputAt(1)->IsRegister()) {                     \
+      __ cmp(offset, i.InputRegister(1));                      \
+    } else {                                                   \
+      __ cmp(offset, i.InputImmediate(1));                     \
+    }                                                          \
+    auto ool = new (zone()) OutOfLineLoad##Type(this, result); \
+    __ b(hs, ool->entry());                                    \
+    __ vldr(result, i.InputOffset(2));                         \
+    __ bind(ool->exit());                                      \
+    DCHECK_EQ(LeaveCC, i.OutputSBit());                        \
   } while (0)
 
-
 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                \
   do {                                                          \
     auto result = i.OutputRegister();                           \
@@ -361,21 +337,19 @@
     DCHECK_EQ(LeaveCC, i.OutputSBit());                         \
   } while (0)
 
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT(width)        \
-  do {                                             \
-    auto offset = i.InputRegister(0);              \
-    if (instr->InputAt(1)->IsRegister()) {         \
-      __ cmp(offset, i.InputRegister(1));          \
-    } else {                                       \
-      __ cmp(offset, i.InputImmediate(1));         \
-    }                                              \
-    auto value = i.InputFloat##width##Register(2); \
-    __ vstr(value, i.InputOffset(3), lo);          \
-    DCHECK_EQ(LeaveCC, i.OutputSBit());            \
+#define ASSEMBLE_CHECKED_STORE_FP(Type)      \
+  do {                                       \
+    auto offset = i.InputRegister(0);        \
+    if (instr->InputAt(1)->IsRegister()) {   \
+      __ cmp(offset, i.InputRegister(1));    \
+    } else {                                 \
+      __ cmp(offset, i.InputImmediate(1));   \
+    }                                        \
+    auto value = i.Input##Type##Register(2); \
+    __ vstr(value, i.InputOffset(3), lo);    \
+    DCHECK_EQ(LeaveCC, i.OutputSBit());      \
   } while (0)
 
-
 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
   do {                                            \
     auto offset = i.InputRegister(0);             \
@@ -404,6 +378,35 @@
     __ dmb(ISH);                                                      \
   } while (0)
 
+#define ASSEMBLE_IEEE754_BINOP(name)                                           \
+  do {                                                                         \
+    /* TODO(bmeurer): We should really get rid of this special instruction, */ \
+    /* and generate a CallAddress instruction instead. */                      \
+    FrameScope scope(masm(), StackFrame::MANUAL);                              \
+    __ PrepareCallCFunction(0, 2, kScratchReg);                                \
+    __ MovToFloatParameters(i.InputDoubleRegister(0),                          \
+                            i.InputDoubleRegister(1));                         \
+    __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()),  \
+                     0, 2);                                                    \
+    /* Move the result in the double result register. */                       \
+    __ MovFromFloatResult(i.OutputDoubleRegister());                           \
+    DCHECK_EQ(LeaveCC, i.OutputSBit());                                        \
+  } while (0)
+
+#define ASSEMBLE_IEEE754_UNOP(name)                                            \
+  do {                                                                         \
+    /* TODO(bmeurer): We should really get rid of this special instruction, */ \
+    /* and generate a CallAddress instruction instead. */                      \
+    FrameScope scope(masm(), StackFrame::MANUAL);                              \
+    __ PrepareCallCFunction(0, 1, kScratchReg);                                \
+    __ MovToFloatParameter(i.InputDoubleRegister(0));                          \
+    __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()),  \
+                     0, 1);                                                    \
+    /* Move the result in the double result register. */                       \
+    __ MovFromFloatResult(i.OutputDoubleRegister());                           \
+    DCHECK_EQ(LeaveCC, i.OutputSBit());                                        \
+  } while (0)
+
 void CodeGenerator::AssembleDeconstructFrame() {
   __ LeaveFrame(StackFrame::MANUAL);
 }
@@ -584,6 +587,14 @@
       AssembleArchTableSwitch(instr);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
+    case kArchDebugBreak:
+      __ stop("kArchDebugBreak");
+      break;
+    case kArchComment: {
+      Address comment_string = i.InputExternalReference(0).address();
+      __ RecordComment(reinterpret_cast<const char*>(comment_string));
+      break;
+    }
     case kArchNop:
     case kArchThrowTerminator:
       // don't emit code for nops.
@@ -619,7 +630,7 @@
       }
       break;
     case kArchTruncateDoubleToI:
-      __ TruncateDoubleToI(i.OutputRegister(), i.InputFloat64Register(0));
+      __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArchStoreWithWriteBarrier: {
@@ -663,6 +674,45 @@
       __ add(i.OutputRegister(0), base, Operand(offset.offset()));
       break;
     }
+    case kIeee754Float64Atan:
+      ASSEMBLE_IEEE754_UNOP(atan);
+      break;
+    case kIeee754Float64Atan2:
+      ASSEMBLE_IEEE754_BINOP(atan2);
+      break;
+    case kIeee754Float64Cbrt:
+      ASSEMBLE_IEEE754_UNOP(cbrt);
+      break;
+    case kIeee754Float64Cos:
+      ASSEMBLE_IEEE754_UNOP(cos);
+      break;
+    case kIeee754Float64Exp:
+      ASSEMBLE_IEEE754_UNOP(exp);
+      break;
+    case kIeee754Float64Expm1:
+      ASSEMBLE_IEEE754_UNOP(expm1);
+      break;
+    case kIeee754Float64Atanh:
+      ASSEMBLE_IEEE754_UNOP(atanh);
+      break;
+    case kIeee754Float64Log:
+      ASSEMBLE_IEEE754_UNOP(log);
+      break;
+    case kIeee754Float64Log1p:
+      ASSEMBLE_IEEE754_UNOP(log1p);
+      break;
+    case kIeee754Float64Log2:
+      ASSEMBLE_IEEE754_UNOP(log2);
+      break;
+    case kIeee754Float64Log10:
+      ASSEMBLE_IEEE754_UNOP(log10);
+      break;
+    case kIeee754Float64Sin:
+      ASSEMBLE_IEEE754_UNOP(sin);
+      break;
+    case kIeee754Float64Tan:
+      ASSEMBLE_IEEE754_UNOP(tan);
+      break;
     case kArmAdd:
       __ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
              i.OutputSBit());
@@ -684,7 +734,7 @@
              i.InputRegister(2), i.OutputSBit());
       break;
     case kArmMls: {
-      CpuFeatureScope scope(masm(), MLS);
+      CpuFeatureScope scope(masm(), ARMv7);
       __ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
              i.InputRegister(2));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -882,95 +932,95 @@
       break;
     case kArmVcmpF32:
       if (instr->InputAt(1)->IsFPRegister()) {
-        __ VFPCompareAndSetFlags(i.InputFloat32Register(0),
-                                 i.InputFloat32Register(1));
+        __ VFPCompareAndSetFlags(i.InputFloatRegister(0),
+                                 i.InputFloatRegister(1));
       } else {
         DCHECK(instr->InputAt(1)->IsImmediate());
         // 0.0 is the only immediate supported by vcmp instructions.
         DCHECK(i.InputFloat32(1) == 0.0f);
-        __ VFPCompareAndSetFlags(i.InputFloat32Register(0), i.InputFloat32(1));
+        __ VFPCompareAndSetFlags(i.InputFloatRegister(0), i.InputFloat32(1));
       }
       DCHECK_EQ(SetCC, i.OutputSBit());
       break;
     case kArmVaddF32:
-      __ vadd(i.OutputFloat32Register(), i.InputFloat32Register(0),
-              i.InputFloat32Register(1));
+      __ vadd(i.OutputFloatRegister(), i.InputFloatRegister(0),
+              i.InputFloatRegister(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVsubF32:
-      __ vsub(i.OutputFloat32Register(), i.InputFloat32Register(0),
-              i.InputFloat32Register(1));
+      __ vsub(i.OutputFloatRegister(), i.InputFloatRegister(0),
+              i.InputFloatRegister(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVmulF32:
-      __ vmul(i.OutputFloat32Register(), i.InputFloat32Register(0),
-              i.InputFloat32Register(1));
+      __ vmul(i.OutputFloatRegister(), i.InputFloatRegister(0),
+              i.InputFloatRegister(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVmlaF32:
-      __ vmla(i.OutputFloat32Register(), i.InputFloat32Register(1),
-              i.InputFloat32Register(2));
+      __ vmla(i.OutputFloatRegister(), i.InputFloatRegister(1),
+              i.InputFloatRegister(2));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVmlsF32:
-      __ vmls(i.OutputFloat32Register(), i.InputFloat32Register(1),
-              i.InputFloat32Register(2));
+      __ vmls(i.OutputFloatRegister(), i.InputFloatRegister(1),
+              i.InputFloatRegister(2));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVdivF32:
-      __ vdiv(i.OutputFloat32Register(), i.InputFloat32Register(0),
-              i.InputFloat32Register(1));
+      __ vdiv(i.OutputFloatRegister(), i.InputFloatRegister(0),
+              i.InputFloatRegister(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVsqrtF32:
-      __ vsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0));
+      __ vsqrt(i.OutputFloatRegister(), i.InputFloatRegister(0));
       break;
     case kArmVabsF32:
-      __ vabs(i.OutputFloat32Register(), i.InputFloat32Register(0));
+      __ vabs(i.OutputFloatRegister(), i.InputFloatRegister(0));
       break;
     case kArmVnegF32:
-      __ vneg(i.OutputFloat32Register(), i.InputFloat32Register(0));
+      __ vneg(i.OutputFloatRegister(), i.InputFloatRegister(0));
       break;
     case kArmVcmpF64:
       if (instr->InputAt(1)->IsFPRegister()) {
-        __ VFPCompareAndSetFlags(i.InputFloat64Register(0),
-                                 i.InputFloat64Register(1));
+        __ VFPCompareAndSetFlags(i.InputDoubleRegister(0),
+                                 i.InputDoubleRegister(1));
       } else {
         DCHECK(instr->InputAt(1)->IsImmediate());
         // 0.0 is the only immediate supported by vcmp instructions.
         DCHECK(i.InputDouble(1) == 0.0);
-        __ VFPCompareAndSetFlags(i.InputFloat64Register(0), i.InputDouble(1));
+        __ VFPCompareAndSetFlags(i.InputDoubleRegister(0), i.InputDouble(1));
       }
       DCHECK_EQ(SetCC, i.OutputSBit());
       break;
     case kArmVaddF64:
-      __ vadd(i.OutputFloat64Register(), i.InputFloat64Register(0),
-              i.InputFloat64Register(1));
+      __ vadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVsubF64:
-      __ vsub(i.OutputFloat64Register(), i.InputFloat64Register(0),
-              i.InputFloat64Register(1));
+      __ vsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVmulF64:
-      __ vmul(i.OutputFloat64Register(), i.InputFloat64Register(0),
-              i.InputFloat64Register(1));
+      __ vmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVmlaF64:
-      __ vmla(i.OutputFloat64Register(), i.InputFloat64Register(1),
-              i.InputFloat64Register(2));
+      __ vmla(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+              i.InputDoubleRegister(2));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVmlsF64:
-      __ vmls(i.OutputFloat64Register(), i.InputFloat64Register(1),
-              i.InputFloat64Register(2));
+      __ vmls(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+              i.InputDoubleRegister(2));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVdivF64:
-      __ vdiv(i.OutputFloat64Register(), i.InputFloat64Register(0),
-              i.InputFloat64Register(1));
+      __ vdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVmodF64: {
@@ -978,136 +1028,143 @@
       // and generate a CallAddress instruction instead.
       FrameScope scope(masm(), StackFrame::MANUAL);
       __ PrepareCallCFunction(0, 2, kScratchReg);
-      __ MovToFloatParameters(i.InputFloat64Register(0),
-                              i.InputFloat64Register(1));
+      __ MovToFloatParameters(i.InputDoubleRegister(0),
+                              i.InputDoubleRegister(1));
       __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
                        0, 2);
       // Move the result in the double result register.
-      __ MovFromFloatResult(i.OutputFloat64Register());
+      __ MovFromFloatResult(i.OutputDoubleRegister());
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmVsqrtF64:
-      __ vsqrt(i.OutputFloat64Register(), i.InputFloat64Register(0));
+      __ vsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
     case kArmVabsF64:
-      __ vabs(i.OutputFloat64Register(), i.InputFloat64Register(0));
+      __ vabs(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
     case kArmVnegF64:
-      __ vneg(i.OutputFloat64Register(), i.InputFloat64Register(0));
+      __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
     case kArmVrintmF32:
-      __ vrintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
+      __ vrintm(i.OutputFloatRegister(), i.InputFloatRegister(0));
       break;
     case kArmVrintmF64:
-      __ vrintm(i.OutputFloat64Register(), i.InputFloat64Register(0));
+      __ vrintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
     case kArmVrintpF32:
-      __ vrintp(i.OutputFloat32Register(), i.InputFloat32Register(0));
+      __ vrintp(i.OutputFloatRegister(), i.InputFloatRegister(0));
       break;
     case kArmVrintpF64:
-      __ vrintp(i.OutputFloat64Register(), i.InputFloat64Register(0));
+      __ vrintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
     case kArmVrintzF32:
-      __ vrintz(i.OutputFloat32Register(), i.InputFloat32Register(0));
+      __ vrintz(i.OutputFloatRegister(), i.InputFloatRegister(0));
       break;
     case kArmVrintzF64:
-      __ vrintz(i.OutputFloat64Register(), i.InputFloat64Register(0));
+      __ vrintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
     case kArmVrintaF64:
-      __ vrinta(i.OutputFloat64Register(), i.InputFloat64Register(0));
+      __ vrinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
     case kArmVrintnF32:
-      __ vrintn(i.OutputFloat32Register(), i.InputFloat32Register(0));
+      __ vrintn(i.OutputFloatRegister(), i.InputFloatRegister(0));
       break;
     case kArmVrintnF64:
-      __ vrintn(i.OutputFloat64Register(), i.InputFloat64Register(0));
+      __ vrintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
     case kArmVcvtF32F64: {
-      __ vcvt_f32_f64(i.OutputFloat32Register(), i.InputFloat64Register(0));
+      __ vcvt_f32_f64(i.OutputFloatRegister(), i.InputDoubleRegister(0));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmVcvtF64F32: {
-      __ vcvt_f64_f32(i.OutputFloat64Register(), i.InputFloat32Register(0));
+      __ vcvt_f64_f32(i.OutputDoubleRegister(), i.InputFloatRegister(0));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmVcvtF32S32: {
       SwVfpRegister scratch = kScratchDoubleReg.low();
       __ vmov(scratch, i.InputRegister(0));
-      __ vcvt_f32_s32(i.OutputFloat32Register(), scratch);
+      __ vcvt_f32_s32(i.OutputFloatRegister(), scratch);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmVcvtF32U32: {
       SwVfpRegister scratch = kScratchDoubleReg.low();
       __ vmov(scratch, i.InputRegister(0));
-      __ vcvt_f32_u32(i.OutputFloat32Register(), scratch);
+      __ vcvt_f32_u32(i.OutputFloatRegister(), scratch);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmVcvtF64S32: {
       SwVfpRegister scratch = kScratchDoubleReg.low();
       __ vmov(scratch, i.InputRegister(0));
-      __ vcvt_f64_s32(i.OutputFloat64Register(), scratch);
+      __ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmVcvtF64U32: {
       SwVfpRegister scratch = kScratchDoubleReg.low();
       __ vmov(scratch, i.InputRegister(0));
-      __ vcvt_f64_u32(i.OutputFloat64Register(), scratch);
+      __ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmVcvtS32F32: {
       SwVfpRegister scratch = kScratchDoubleReg.low();
-      __ vcvt_s32_f32(scratch, i.InputFloat32Register(0));
+      __ vcvt_s32_f32(scratch, i.InputFloatRegister(0));
       __ vmov(i.OutputRegister(), scratch);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmVcvtU32F32: {
       SwVfpRegister scratch = kScratchDoubleReg.low();
-      __ vcvt_u32_f32(scratch, i.InputFloat32Register(0));
+      __ vcvt_u32_f32(scratch, i.InputFloatRegister(0));
       __ vmov(i.OutputRegister(), scratch);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmVcvtS32F64: {
       SwVfpRegister scratch = kScratchDoubleReg.low();
-      __ vcvt_s32_f64(scratch, i.InputFloat64Register(0));
+      __ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
       __ vmov(i.OutputRegister(), scratch);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmVcvtU32F64: {
       SwVfpRegister scratch = kScratchDoubleReg.low();
-      __ vcvt_u32_f64(scratch, i.InputFloat64Register(0));
+      __ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
       __ vmov(i.OutputRegister(), scratch);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
+    case kArmVmovU32F32:
+      __ vmov(i.OutputRegister(), i.InputFloatRegister(0));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmVmovF32U32:
+      __ vmov(i.OutputFloatRegister(), i.InputRegister(0));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
     case kArmVmovLowU32F64:
-      __ VmovLow(i.OutputRegister(), i.InputFloat64Register(0));
+      __ VmovLow(i.OutputRegister(), i.InputDoubleRegister(0));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVmovLowF64U32:
-      __ VmovLow(i.OutputFloat64Register(), i.InputRegister(1));
+      __ VmovLow(i.OutputDoubleRegister(), i.InputRegister(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVmovHighU32F64:
-      __ VmovHigh(i.OutputRegister(), i.InputFloat64Register(0));
+      __ VmovHigh(i.OutputRegister(), i.InputDoubleRegister(0));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVmovHighF64U32:
-      __ VmovHigh(i.OutputFloat64Register(), i.InputRegister(1));
+      __ VmovHigh(i.OutputDoubleRegister(), i.InputRegister(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVmovF64U32U32:
-      __ vmov(i.OutputFloat64Register(), i.InputRegister(0),
-              i.InputRegister(1));
+      __ vmov(i.OutputDoubleRegister(), i.InputRegister(0), i.InputRegister(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmLdrb:
@@ -1118,65 +1175,50 @@
       __ ldrsb(i.OutputRegister(), i.InputOffset());
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
-    case kArmStrb: {
-      size_t index = 0;
-      MemOperand operand = i.InputOffset(&index);
-      __ strb(i.InputRegister(index), operand);
+    case kArmStrb:
+      __ strb(i.InputRegister(0), i.InputOffset(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
-    }
     case kArmLdrh:
       __ ldrh(i.OutputRegister(), i.InputOffset());
       break;
     case kArmLdrsh:
       __ ldrsh(i.OutputRegister(), i.InputOffset());
       break;
-    case kArmStrh: {
-      size_t index = 0;
-      MemOperand operand = i.InputOffset(&index);
-      __ strh(i.InputRegister(index), operand);
+    case kArmStrh:
+      __ strh(i.InputRegister(0), i.InputOffset(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
-    }
     case kArmLdr:
       __ ldr(i.OutputRegister(), i.InputOffset());
       break;
-    case kArmStr: {
-      size_t index = 0;
-      MemOperand operand = i.InputOffset(&index);
-      __ str(i.InputRegister(index), operand);
+    case kArmStr:
+      __ str(i.InputRegister(0), i.InputOffset(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
-    }
     case kArmVldrF32: {
-      __ vldr(i.OutputFloat32Register(), i.InputOffset());
+      __ vldr(i.OutputFloatRegister(), i.InputOffset());
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
-    case kArmVstrF32: {
-      size_t index = 0;
-      MemOperand operand = i.InputOffset(&index);
-      __ vstr(i.InputFloat32Register(index), operand);
+    case kArmVstrF32:
+      __ vstr(i.InputFloatRegister(0), i.InputOffset(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
-    }
     case kArmVldrF64:
-      __ vldr(i.OutputFloat64Register(), i.InputOffset());
+      __ vldr(i.OutputDoubleRegister(), i.InputOffset());
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
-    case kArmVstrF64: {
-      size_t index = 0;
-      MemOperand operand = i.InputOffset(&index);
-      __ vstr(i.InputFloat64Register(index), operand);
+    case kArmVstrF64:
+      __ vstr(i.InputDoubleRegister(0), i.InputOffset(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
-    }
     case kArmFloat32Max: {
       CpuFeatureScope scope(masm(), ARMv8);
       // (b < a) ? a : b
-      SwVfpRegister a = i.InputFloat32Register(0);
-      SwVfpRegister b = i.InputFloat32Register(1);
-      SwVfpRegister result = i.OutputFloat32Register(0);
+      SwVfpRegister a = i.InputFloatRegister(0);
+      SwVfpRegister b = i.InputFloatRegister(1);
+      SwVfpRegister result = i.OutputFloatRegister();
       __ VFPCompareAndSetFlags(a, b);
       __ vsel(gt, result, a, b);
       break;
@@ -1184,9 +1226,9 @@
     case kArmFloat32Min: {
       CpuFeatureScope scope(masm(), ARMv8);
       // (a < b) ? a : b
-      SwVfpRegister a = i.InputFloat32Register(0);
-      SwVfpRegister b = i.InputFloat32Register(1);
-      SwVfpRegister result = i.OutputFloat32Register(0);
+      SwVfpRegister a = i.InputFloatRegister(0);
+      SwVfpRegister b = i.InputFloatRegister(1);
+      SwVfpRegister result = i.OutputFloatRegister();
       __ VFPCompareAndSetFlags(b, a);
       __ vsel(gt, result, a, b);
       break;
@@ -1194,9 +1236,9 @@
     case kArmFloat64Max: {
       CpuFeatureScope scope(masm(), ARMv8);
       // (b < a) ? a : b
-      DwVfpRegister a = i.InputFloat64Register(0);
-      DwVfpRegister b = i.InputFloat64Register(1);
-      DwVfpRegister result = i.OutputFloat64Register(0);
+      DwVfpRegister a = i.InputDoubleRegister(0);
+      DwVfpRegister b = i.InputDoubleRegister(1);
+      DwVfpRegister result = i.OutputDoubleRegister();
       __ VFPCompareAndSetFlags(a, b);
       __ vsel(gt, result, a, b);
       break;
@@ -1204,17 +1246,30 @@
     case kArmFloat64Min: {
       CpuFeatureScope scope(masm(), ARMv8);
       // (a < b) ? a : b
-      DwVfpRegister a = i.InputFloat64Register(0);
-      DwVfpRegister b = i.InputFloat64Register(1);
-      DwVfpRegister result = i.OutputFloat64Register(0);
+      DwVfpRegister a = i.InputDoubleRegister(0);
+      DwVfpRegister b = i.InputDoubleRegister(1);
+      DwVfpRegister result = i.OutputDoubleRegister();
       __ VFPCompareAndSetFlags(b, a);
       __ vsel(gt, result, a, b);
       break;
     }
+    case kArmFloat64SilenceNaN: {
+      DwVfpRegister value = i.InputDoubleRegister(0);
+      DwVfpRegister result = i.OutputDoubleRegister();
+      __ VFPCanonicalizeNaN(result, value);
+      break;
+    }
     case kArmPush:
       if (instr->InputAt(0)->IsFPRegister()) {
-        __ vpush(i.InputDoubleRegister(0));
-        frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+        LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
+        if (op->representation() == MachineRepresentation::kFloat64) {
+          __ vpush(i.InputDoubleRegister(0));
+          frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+        } else {
+          DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+          __ vpush(i.InputFloatRegister(0));
+          frame_access_state()->IncreaseSPDelta(1);
+        }
       } else {
         __ push(i.InputRegister(0));
         frame_access_state()->IncreaseSPDelta(1);
@@ -1243,10 +1298,10 @@
       ASSEMBLE_CHECKED_LOAD_INTEGER(ldr);
       break;
     case kCheckedLoadFloat32:
-      ASSEMBLE_CHECKED_LOAD_FLOAT(32);
+      ASSEMBLE_CHECKED_LOAD_FP(Float);
       break;
     case kCheckedLoadFloat64:
-      ASSEMBLE_CHECKED_LOAD_FLOAT(64);
+      ASSEMBLE_CHECKED_LOAD_FP(Double);
       break;
     case kCheckedStoreWord8:
       ASSEMBLE_CHECKED_STORE_INTEGER(strb);
@@ -1258,10 +1313,10 @@
       ASSEMBLE_CHECKED_STORE_INTEGER(str);
       break;
     case kCheckedStoreFloat32:
-      ASSEMBLE_CHECKED_STORE_FLOAT(32);
+      ASSEMBLE_CHECKED_STORE_FP(Float);
       break;
     case kCheckedStoreFloat64:
-      ASSEMBLE_CHECKED_STORE_FLOAT(64);
+      ASSEMBLE_CHECKED_STORE_FP(Double);
       break;
     case kCheckedLoadWord64:
     case kCheckedStoreWord64:
@@ -1522,6 +1577,7 @@
       switch (src.type()) {
         case Constant::kInt32:
           if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
               src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
             __ mov(dst, Operand(src.ToInt32(), src.rmode()));
           } else {
@@ -1566,13 +1622,13 @@
         __ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
         __ str(ip, dst);
       } else {
-        SwVfpRegister dst = g.ToFloat32Register(destination);
+        SwVfpRegister dst = g.ToFloatRegister(destination);
         __ vmov(dst, src.ToFloat32());
       }
     } else {
       DCHECK_EQ(Constant::kFloat64, src.type());
       DwVfpRegister dst = destination->IsFPRegister()
-                              ? g.ToFloat64Register(destination)
+                              ? g.ToDoubleRegister(destination)
                               : kScratchDoubleReg;
       __ vmov(dst, src.ToFloat64(), kScratchReg);
       if (destination->IsFPStackSlot()) {
@@ -1580,23 +1636,50 @@
       }
     }
   } else if (source->IsFPRegister()) {
-    DwVfpRegister src = g.ToDoubleRegister(source);
-    if (destination->IsFPRegister()) {
-      DwVfpRegister dst = g.ToDoubleRegister(destination);
-      __ Move(dst, src);
+    MachineRepresentation rep = LocationOperand::cast(source)->representation();
+    if (rep == MachineRepresentation::kFloat64) {
+      DwVfpRegister src = g.ToDoubleRegister(source);
+      if (destination->IsFPRegister()) {
+        DwVfpRegister dst = g.ToDoubleRegister(destination);
+        __ Move(dst, src);
+      } else {
+        DCHECK(destination->IsFPStackSlot());
+        __ vstr(src, g.ToMemOperand(destination));
+      }
     } else {
-      DCHECK(destination->IsFPStackSlot());
-      __ vstr(src, g.ToMemOperand(destination));
+      DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+      SwVfpRegister src = g.ToFloatRegister(source);
+      if (destination->IsFPRegister()) {
+        SwVfpRegister dst = g.ToFloatRegister(destination);
+        __ Move(dst, src);
+      } else {
+        DCHECK(destination->IsFPStackSlot());
+        __ vstr(src, g.ToMemOperand(destination));
+      }
     }
   } else if (source->IsFPStackSlot()) {
-    DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
     MemOperand src = g.ToMemOperand(source);
+    MachineRepresentation rep =
+        LocationOperand::cast(destination)->representation();
     if (destination->IsFPRegister()) {
-      __ vldr(g.ToDoubleRegister(destination), src);
+      if (rep == MachineRepresentation::kFloat64) {
+        __ vldr(g.ToDoubleRegister(destination), src);
+      } else {
+        DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+        __ vldr(g.ToFloatRegister(destination), src);
+      }
     } else {
-      DwVfpRegister temp = kScratchDoubleReg;
-      __ vldr(temp, src);
-      __ vstr(temp, g.ToMemOperand(destination));
+      DCHECK(destination->IsFPStackSlot());
+      if (rep == MachineRepresentation::kFloat64) {
+        DwVfpRegister temp = kScratchDoubleReg;
+        __ vldr(temp, src);
+        __ vstr(temp, g.ToMemOperand(destination));
+      } else {
+        DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+        SwVfpRegister temp = kScratchDoubleReg.low();
+        __ vldr(temp, src);
+        __ vstr(temp, g.ToMemOperand(destination));
+      }
     }
   } else {
     UNREACHABLE();
@@ -1636,34 +1719,61 @@
     __ str(temp_0, dst);
     __ vstr(temp_1, src);
   } else if (source->IsFPRegister()) {
-    DwVfpRegister temp = kScratchDoubleReg;
-    DwVfpRegister src = g.ToDoubleRegister(source);
-    if (destination->IsFPRegister()) {
-      DwVfpRegister dst = g.ToDoubleRegister(destination);
-      __ Move(temp, src);
-      __ Move(src, dst);
-      __ Move(dst, temp);
+    MachineRepresentation rep = LocationOperand::cast(source)->representation();
+    LowDwVfpRegister temp = kScratchDoubleReg;
+    if (rep == MachineRepresentation::kFloat64) {
+      DwVfpRegister src = g.ToDoubleRegister(source);
+      if (destination->IsFPRegister()) {
+        DwVfpRegister dst = g.ToDoubleRegister(destination);
+        __ Move(temp, src);
+        __ Move(src, dst);
+        __ Move(dst, temp);
+      } else {
+        DCHECK(destination->IsFPStackSlot());
+        MemOperand dst = g.ToMemOperand(destination);
+        __ Move(temp, src);
+        __ vldr(src, dst);
+        __ vstr(temp, dst);
+      }
     } else {
-      DCHECK(destination->IsFPStackSlot());
-      MemOperand dst = g.ToMemOperand(destination);
-      __ Move(temp, src);
-      __ vldr(src, dst);
-      __ vstr(temp, dst);
+      DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+      SwVfpRegister src = g.ToFloatRegister(source);
+      if (destination->IsFPRegister()) {
+        SwVfpRegister dst = g.ToFloatRegister(destination);
+        __ Move(temp.low(), src);
+        __ Move(src, dst);
+        __ Move(dst, temp.low());
+      } else {
+        DCHECK(destination->IsFPStackSlot());
+        MemOperand dst = g.ToMemOperand(destination);
+        __ Move(temp.low(), src);
+        __ vldr(src, dst);
+        __ vstr(temp.low(), dst);
+      }
     }
   } else if (source->IsFPStackSlot()) {
     DCHECK(destination->IsFPStackSlot());
     Register temp_0 = kScratchReg;
-    DwVfpRegister temp_1 = kScratchDoubleReg;
+    LowDwVfpRegister temp_1 = kScratchDoubleReg;
     MemOperand src0 = g.ToMemOperand(source);
-    MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
     MemOperand dst0 = g.ToMemOperand(destination);
-    MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
-    __ vldr(temp_1, dst0);  // Save destination in temp_1.
-    __ ldr(temp_0, src0);   // Then use temp_0 to copy source to destination.
-    __ str(temp_0, dst0);
-    __ ldr(temp_0, src1);
-    __ str(temp_0, dst1);
-    __ vstr(temp_1, src0);
+    MachineRepresentation rep = LocationOperand::cast(source)->representation();
+    if (rep == MachineRepresentation::kFloat64) {
+      MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
+      MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
+      __ vldr(temp_1, dst0);  // Save destination in temp_1.
+      __ ldr(temp_0, src0);   // Then use temp_0 to copy source to destination.
+      __ str(temp_0, dst0);
+      __ ldr(temp_0, src1);
+      __ str(temp_0, dst1);
+      __ vstr(temp_1, src0);
+    } else {
+      DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+      __ vldr(temp_1.low(), dst0);  // Save destination in temp_1.
+      __ ldr(temp_0, src0);  // Then use temp_0 to copy source to destination.
+      __ str(temp_0, dst0);
+      __ vstr(temp_1.low(), src0);
+    }
   } else {
     // No other combinations are possible.
     UNREACHABLE();
diff --git a/src/compiler/arm/instruction-codes-arm.h b/src/compiler/arm/instruction-codes-arm.h
index fc371e0..bc3336f 100644
--- a/src/compiler/arm/instruction-codes-arm.h
+++ b/src/compiler/arm/instruction-codes-arm.h
@@ -92,6 +92,8 @@
   V(ArmVcvtU32F32)                 \
   V(ArmVcvtS32F64)                 \
   V(ArmVcvtU32F64)                 \
+  V(ArmVmovU32F32)                 \
+  V(ArmVmovF32U32)                 \
   V(ArmVmovLowU32F64)              \
   V(ArmVmovLowF64U32)              \
   V(ArmVmovHighU32F64)             \
@@ -105,6 +107,7 @@
   V(ArmFloat32Min)                 \
   V(ArmFloat64Max)                 \
   V(ArmFloat64Min)                 \
+  V(ArmFloat64SilenceNaN)          \
   V(ArmLdrb)                       \
   V(ArmLdrsb)                      \
   V(ArmStrb)                       \
diff --git a/src/compiler/arm/instruction-scheduler-arm.cc b/src/compiler/arm/instruction-scheduler-arm.cc
index ec28b72..065fe52 100644
--- a/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/src/compiler/arm/instruction-scheduler-arm.cc
@@ -94,6 +94,8 @@
     case kArmVcvtU32F32:
     case kArmVcvtS32F64:
     case kArmVcvtU32F64:
+    case kArmVmovU32F32:
+    case kArmVmovF32U32:
     case kArmVmovLowU32F64:
     case kArmVmovLowF64U32:
     case kArmVmovHighU32F64:
@@ -103,6 +105,7 @@
     case kArmFloat64Min:
     case kArmFloat32Max:
     case kArmFloat32Min:
+    case kArmFloat64SilenceNaN:
       return kNoOpcodeFlags;
 
     case kArmVldrF32:
diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
index b2b1a70..e21e63f 100644
--- a/src/compiler/arm/instruction-selector-arm.cc
+++ b/src/compiler/arm/instruction-selector-arm.cc
@@ -115,6 +115,24 @@
   return false;
 }
 
+template <IrOpcode::Value kOpcode, int kImmMin, int kImmMax,
+          AddressingMode kImmMode>
+bool TryMatchShiftImmediate(InstructionSelector* selector,
+                            InstructionCode* opcode_return, Node* node,
+                            InstructionOperand* value_return,
+                            InstructionOperand* shift_return) {
+  ArmOperandGenerator g(selector);
+  if (node->opcode() == kOpcode) {
+    Int32BinopMatcher m(node);
+    if (m.right().IsInRange(kImmMin, kImmMax)) {
+      *opcode_return |= AddressingModeField::encode(kImmMode);
+      *value_return = g.UseRegister(m.left().node());
+      *shift_return = g.UseImmediate(m.right().node());
+      return true;
+    }
+  }
+  return false;
+}
 
 bool TryMatchROR(InstructionSelector* selector, InstructionCode* opcode_return,
                  Node* node, InstructionOperand* value_return,
@@ -142,6 +160,14 @@
                                                value_return, shift_return);
 }
 
+bool TryMatchLSLImmediate(InstructionSelector* selector,
+                          InstructionCode* opcode_return, Node* node,
+                          InstructionOperand* value_return,
+                          InstructionOperand* shift_return) {
+  return TryMatchShiftImmediate<IrOpcode::kWord32Shl, 0, 31,
+                                kMode_Operand2_R_LSL_I>(
+      selector, opcode_return, node, value_return, shift_return);
+}
 
 bool TryMatchLSR(InstructionSelector* selector, InstructionCode* opcode_return,
                  Node* node, InstructionOperand* value_return,
@@ -226,7 +252,14 @@
     inputs[input_count++] = g.Label(cont->false_block());
   }
 
-  outputs[output_count++] = g.DefineAsRegister(node);
+  if (cont->IsDeoptimize()) {
+    // If we can deoptimize as a result of the binop, we need to make sure that
+    // the deopt inputs are not overwritten by the binop result. One way
+    // to achieve that is to declare the output register as same-as-first.
+    outputs[output_count++] = g.DefineSameAsFirst(node);
+  } else {
+    outputs[output_count++] = g.DefineAsRegister(node);
+  }
   if (cont->IsSet()) {
     outputs[output_count++] = g.DefineAsRegister(cont->result());
   }
@@ -294,13 +327,14 @@
   InstructionOperand right_operand = g.UseRegister(m.right().node());
   EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand,
           left_operand, right_operand);
-  if (selector->IsSupported(MLS)) {
+  if (selector->IsSupported(ARMv7)) {
     selector->Emit(kArmMls, result_operand, div_operand, right_operand,
                    left_operand);
   } else {
     InstructionOperand mul_operand = g.TempRegister();
     selector->Emit(kArmMul, mul_operand, div_operand, right_operand);
-    selector->Emit(kArmSub, result_operand, left_operand, mul_operand);
+    selector->Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_R),
+                   result_operand, left_operand, mul_operand);
   }
 }
 
@@ -312,8 +346,11 @@
   ArmOperandGenerator g(this);
   Node* base = node->InputAt(0);
   Node* index = node->InputAt(1);
+  InstructionOperand inputs[3];
+  size_t input_count = 0;
+  InstructionOperand outputs[1];
 
-  ArchOpcode opcode = kArchNop;
+  InstructionCode opcode = kArchNop;
   switch (load_rep.representation()) {
     case MachineRepresentation::kFloat32:
       opcode = kArmVldrF32;
@@ -339,13 +376,24 @@
       return;
   }
 
+  outputs[0] = g.DefineAsRegister(node);
+  inputs[0] = g.UseRegister(base);
+
   if (g.CanBeImmediate(index, opcode)) {
-    Emit(opcode | AddressingModeField::encode(kMode_Offset_RI),
-         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+    input_count = 2;
+    inputs[1] = g.UseImmediate(index);
+    opcode |= AddressingModeField::encode(kMode_Offset_RI);
+  } else if ((opcode == kArmLdr) &&
+             TryMatchLSLImmediate(this, &opcode, index, &inputs[1],
+                                  &inputs[2])) {
+    input_count = 3;
   } else {
-    Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
-         g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+    input_count = 2;
+    inputs[1] = g.UseRegister(index);
+    opcode |= AddressingModeField::encode(kMode_Offset_RR);
   }
+
+  Emit(opcode, arraysize(outputs), outputs, input_count, inputs);
 }
 
 
@@ -397,7 +445,10 @@
     code |= MiscField::encode(static_cast<int>(record_write_mode));
     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
   } else {
-    ArchOpcode opcode = kArchNop;
+    InstructionOperand inputs[4];
+    size_t input_count = 0;
+
+    InstructionCode opcode = kArchNop;
     switch (rep) {
       case MachineRepresentation::kFloat32:
         opcode = kArmVstrF32;
@@ -423,13 +474,23 @@
         return;
     }
 
+    inputs[0] = g.UseRegister(value);
+    inputs[1] = g.UseRegister(base);
+
     if (g.CanBeImmediate(index, opcode)) {
-      Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), g.NoOutput(),
-           g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+      input_count = 3;
+      inputs[2] = g.UseImmediate(index);
+      opcode |= AddressingModeField::encode(kMode_Offset_RI);
+    } else if ((opcode == kArmStr) &&
+               TryMatchLSLImmediate(this, &opcode, index, &inputs[2],
+                                    &inputs[3])) {
+      input_count = 4;
     } else {
-      Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), g.NoOutput(),
-           g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
+      input_count = 3;
+      inputs[2] = g.UseRegister(index);
+      opcode |= AddressingModeField::encode(kMode_Offset_RR);
     }
+    Emit(opcode, 0, nullptr, input_count, inputs);
   }
 }
 
@@ -1022,7 +1083,7 @@
 void InstructionSelector::VisitInt32Sub(Node* node) {
   ArmOperandGenerator g(this);
   Int32BinopMatcher m(node);
-  if (IsSupported(MLS) && m.right().IsInt32Mul() &&
+  if (IsSupported(ARMv7) && m.right().IsInt32Mul() &&
       CanCover(node, m.right().node())) {
     Int32BinopMatcher mright(m.right().node());
     Emit(kArmMls, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
@@ -1150,20 +1211,14 @@
   VisitRR(this, kArmVcvtS32F64, node);
 }
 
-
 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
-  VisitRR(this, kArmVmovLowU32F64, node);
+  VisitRR(this, kArmVmovU32F32, node);
 }
 
-
 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
-  ArmOperandGenerator g(this);
-  Emit(kArmVmovLowF64U32, g.DefineAsRegister(node),
-       ImmediateOperand(ImmediateOperand::INLINE, 0),
-       g.UseRegister(node->InputAt(0)));
+  VisitRR(this, kArmVmovF32U32, node);
 }
 
-
 void InstructionSelector::VisitFloat32Add(Node* node) {
   ArmOperandGenerator g(this);
   Float32BinopMatcher m(node);
@@ -1313,6 +1368,10 @@
   VisitRRR(this, kArmFloat64Max, node);
 }
 
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+  VisitRR(this, kArmFloat64SilenceNaN, node);
+}
+
 void InstructionSelector::VisitFloat32Min(Node* node) {
   DCHECK(IsSupported(ARMv8));
   VisitRRR(this, kArmFloat32Min, node);
@@ -1332,7 +1391,6 @@
   VisitRR(this, kArmVabsF64, node);
 }
 
-
 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
   VisitRR(this, kArmVsqrtF32, node);
 }
@@ -1387,6 +1445,28 @@
   VisitRR(this, kArmVrintnF64, node);
 }
 
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+  VisitRR(this, kArmVnegF32, node);
+}
+
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+  VisitRR(this, kArmVnegF64, node);
+}
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+                                                   InstructionCode opcode) {
+  ArmOperandGenerator g(this);
+  Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
+       g.UseFixed(node->InputAt(1), d1))
+      ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+                                                  InstructionCode opcode) {
+  ArmOperandGenerator g(this);
+  Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0))
+      ->MarkAsCall();
+}
 
 void InstructionSelector::EmitPrepareArguments(
     ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
@@ -1891,9 +1971,13 @@
 // static
 MachineOperatorBuilder::Flags
 InstructionSelector::SupportedMachineOperatorFlags() {
-  MachineOperatorBuilder::Flags flags =
-      MachineOperatorBuilder::kInt32DivIsSafe |
-      MachineOperatorBuilder::kUint32DivIsSafe;
+  MachineOperatorBuilder::Flags flags;
+  if (CpuFeatures::IsSupported(SUDIV)) {
+    // The sdiv and udiv instructions correctly return 0 if the divisor is 0,
+    // but the fall-back implementation does not.
+    flags |= MachineOperatorBuilder::kInt32DivIsSafe |
+             MachineOperatorBuilder::kUint32DivIsSafe;
+  }
   if (CpuFeatures::IsSupported(ARMv7)) {
     flags |= MachineOperatorBuilder::kWord32ReverseBits;
   }
@@ -1910,11 +1994,20 @@
              MachineOperatorBuilder::kFloat32Min |
              MachineOperatorBuilder::kFloat32Max |
              MachineOperatorBuilder::kFloat64Min |
-             MachineOperatorBuilder::kFloat64Max;
+             MachineOperatorBuilder::kFloat64Max |
+             MachineOperatorBuilder::kFloat32Neg |
+             MachineOperatorBuilder::kFloat64Neg;
   }
   return flags;
 }
 
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+  return MachineOperatorBuilder::AlignmentRequirements::
+      FullUnalignedAccessSupport();
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc
index 0f9fb7c..479af7a 100644
--- a/src/compiler/arm64/code-generator-arm64.cc
+++ b/src/compiler/arm64/code-generator-arm64.cc
@@ -210,7 +210,8 @@
           return Operand(constant.ToInt32());
         }
       case Constant::kInt64:
-        if (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE) {
+        if (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+            constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
           return Operand(constant.ToInt64(), constant.rmode());
         } else {
           DCHECK(constant.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
@@ -400,6 +401,17 @@
 
 }  // namespace
 
+#define ASSEMBLE_BOUNDS_CHECK(offset, length, out_of_bounds)   \
+  do {                                                         \
+    if (length.IsImmediate() &&                                \
+        base::bits::IsPowerOfTwo64(length.ImmediateValue())) { \
+      __ Tst(offset, ~(length.ImmediateValue() - 1));          \
+      __ B(ne, out_of_bounds);                                 \
+    } else {                                                   \
+      __ Cmp(offset, length);                                  \
+      __ B(hs, out_of_bounds);                                 \
+    }                                                          \
+  } while (0)
 
 #define ASSEMBLE_CHECKED_LOAD_FLOAT(width)                         \
   do {                                                             \
@@ -407,37 +419,32 @@
     auto buffer = i.InputRegister(0);                              \
     auto offset = i.InputRegister32(1);                            \
     auto length = i.InputOperand32(2);                             \
-    __ Cmp(offset, length);                                        \
     auto ool = new (zone()) OutOfLineLoadNaN##width(this, result); \
-    __ B(hs, ool->entry());                                        \
+    ASSEMBLE_BOUNDS_CHECK(offset, length, ool->entry());           \
     __ Ldr(result, MemOperand(buffer, offset, UXTW));              \
     __ Bind(ool->exit());                                          \
   } while (0)
 
-
 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)             \
   do {                                                       \
     auto result = i.OutputRegister32();                      \
     auto buffer = i.InputRegister(0);                        \
     auto offset = i.InputRegister32(1);                      \
     auto length = i.InputOperand32(2);                       \
-    __ Cmp(offset, length);                                  \
     auto ool = new (zone()) OutOfLineLoadZero(this, result); \
-    __ B(hs, ool->entry());                                  \
+    ASSEMBLE_BOUNDS_CHECK(offset, length, ool->entry());     \
     __ asm_instr(result, MemOperand(buffer, offset, UXTW));  \
     __ Bind(ool->exit());                                    \
   } while (0)
 
-
 #define ASSEMBLE_CHECKED_LOAD_INTEGER_64(asm_instr)          \
   do {                                                       \
     auto result = i.OutputRegister();                        \
     auto buffer = i.InputRegister(0);                        \
     auto offset = i.InputRegister32(1);                      \
     auto length = i.InputOperand32(2);                       \
-    __ Cmp(offset, length);                                  \
     auto ool = new (zone()) OutOfLineLoadZero(this, result); \
-    __ B(hs, ool->entry());                                  \
+    ASSEMBLE_BOUNDS_CHECK(offset, length, ool->entry());     \
     __ asm_instr(result, MemOperand(buffer, offset, UXTW));  \
     __ Bind(ool->exit());                                    \
   } while (0)
@@ -448,9 +455,8 @@
     auto offset = i.InputRegister32(1);                  \
     auto length = i.InputOperand32(2);                   \
     auto value = i.InputFloat##width##OrZeroRegister(3); \
-    __ Cmp(offset, length);                              \
     Label done;                                          \
-    __ B(hs, &done);                                     \
+    ASSEMBLE_BOUNDS_CHECK(offset, length, &done);        \
     __ Str(value, MemOperand(buffer, offset, UXTW));     \
     __ Bind(&done);                                      \
   } while (0)
@@ -461,9 +467,8 @@
     auto offset = i.InputRegister32(1);                    \
     auto length = i.InputOperand32(2);                     \
     auto value = i.InputOrZeroRegister32(3);               \
-    __ Cmp(offset, length);                                \
     Label done;                                            \
-    __ B(hs, &done);                                       \
+    ASSEMBLE_BOUNDS_CHECK(offset, length, &done);          \
     __ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
     __ Bind(&done);                                        \
   } while (0)
@@ -474,9 +479,8 @@
     auto offset = i.InputRegister32(1);                    \
     auto length = i.InputOperand32(2);                     \
     auto value = i.InputOrZeroRegister64(3);               \
-    __ Cmp(offset, length);                                \
     Label done;                                            \
-    __ B(hs, &done);                                       \
+    ASSEMBLE_BOUNDS_CHECK(offset, length, &done);          \
     __ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
     __ Bind(&done);                                        \
   } while (0)
@@ -509,6 +513,20 @@
     __ Dmb(InnerShareable, BarrierAll);                               \
   } while (0)
 
+#define ASSEMBLE_IEEE754_BINOP(name)                                          \
+  do {                                                                        \
+    FrameScope scope(masm(), StackFrame::MANUAL);                             \
+    __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+                     0, 2);                                                   \
+  } while (0)
+
+#define ASSEMBLE_IEEE754_UNOP(name)                                           \
+  do {                                                                        \
+    FrameScope scope(masm(), StackFrame::MANUAL);                             \
+    __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+                     0, 1);                                                   \
+  } while (0)
+
 void CodeGenerator::AssembleDeconstructFrame() {
   const CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   if (descriptor->IsCFunctionCall() || descriptor->UseNativeStack()) {
@@ -711,6 +729,14 @@
     case kArchLookupSwitch:
       AssembleArchLookupSwitch(instr);
       break;
+    case kArchDebugBreak:
+      __ Debug("kArchDebugBreak", 0, BREAK);
+      break;
+    case kArchComment: {
+      Address comment_string = i.InputExternalReference(0).address();
+      __ RecordComment(reinterpret_cast<const char*>(comment_string));
+      break;
+    }
     case kArchNop:
     case kArchThrowTerminator:
       // don't emit code for nops.
@@ -781,6 +807,45 @@
       __ Add(i.OutputRegister(0), base, Operand(offset.offset()));
       break;
     }
+    case kIeee754Float64Atan:
+      ASSEMBLE_IEEE754_UNOP(atan);
+      break;
+    case kIeee754Float64Atan2:
+      ASSEMBLE_IEEE754_BINOP(atan2);
+      break;
+    case kIeee754Float64Cos:
+      ASSEMBLE_IEEE754_UNOP(cos);
+      break;
+    case kIeee754Float64Cbrt:
+      ASSEMBLE_IEEE754_UNOP(cbrt);
+      break;
+    case kIeee754Float64Exp:
+      ASSEMBLE_IEEE754_UNOP(exp);
+      break;
+    case kIeee754Float64Expm1:
+      ASSEMBLE_IEEE754_UNOP(expm1);
+      break;
+    case kIeee754Float64Atanh:
+      ASSEMBLE_IEEE754_UNOP(atanh);
+      break;
+    case kIeee754Float64Log:
+      ASSEMBLE_IEEE754_UNOP(log);
+      break;
+    case kIeee754Float64Log1p:
+      ASSEMBLE_IEEE754_UNOP(log1p);
+      break;
+    case kIeee754Float64Log2:
+      ASSEMBLE_IEEE754_UNOP(log2);
+      break;
+    case kIeee754Float64Log10:
+      ASSEMBLE_IEEE754_UNOP(log10);
+      break;
+    case kIeee754Float64Sin:
+      ASSEMBLE_IEEE754_UNOP(sin);
+      break;
+    case kIeee754Float64Tan:
+      ASSEMBLE_IEEE754_UNOP(tan);
+      break;
     case kArm64Float32RoundDown:
       __ Frintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
       break;
@@ -1035,6 +1100,7 @@
       // Pseudo instructions turned into tbz/tbnz in AssembleArchBranch.
       break;
     case kArm64CompareAndBranch32:
+    case kArm64CompareAndBranch:
       // Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
       break;
     case kArm64ClaimCSP: {
@@ -1180,6 +1246,9 @@
     case kArm64Float32Abs:
       __ Fabs(i.OutputFloat32Register(), i.InputFloat32Register(0));
       break;
+    case kArm64Float32Neg:
+      __ Fneg(i.OutputFloat32Register(), i.InputFloat32Register(0));
+      break;
     case kArm64Float32Sqrt:
       __ Fsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0));
       break;
@@ -1357,6 +1426,9 @@
     case kArm64Float64MoveU64:
       __ Fmov(i.OutputFloat64Register(), i.InputRegister(0));
       break;
+    case kArm64Float64SilenceNaN:
+      __ CanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
     case kArm64U64MoveFloat64:
       __ Fmov(i.OutputRegister(), i.InputDoubleRegister(0));
       break;
@@ -1497,6 +1569,17 @@
       default:
         UNREACHABLE();
     }
+  } else if (opcode == kArm64CompareAndBranch) {
+    switch (condition) {
+      case kEqual:
+        __ Cbz(i.InputRegister64(0), tlabel);
+        break;
+      case kNotEqual:
+        __ Cbnz(i.InputRegister64(0), tlabel);
+        break;
+      default:
+        UNREACHABLE();
+    }
   } else if (opcode == kArm64TestAndBranch32) {
     switch (condition) {
       case kEqual:
diff --git a/src/compiler/arm64/instruction-codes-arm64.h b/src/compiler/arm64/instruction-codes-arm64.h
index f03c2fb..2b5fe33 100644
--- a/src/compiler/arm64/instruction-codes-arm64.h
+++ b/src/compiler/arm64/instruction-codes-arm64.h
@@ -78,6 +78,7 @@
   V(Arm64TestAndBranch32)          \
   V(Arm64TestAndBranch)            \
   V(Arm64CompareAndBranch32)       \
+  V(Arm64CompareAndBranch)         \
   V(Arm64ClaimCSP)                 \
   V(Arm64ClaimJSSP)                \
   V(Arm64PokeCSP)                  \
@@ -91,6 +92,7 @@
   V(Arm64Float32Max)               \
   V(Arm64Float32Min)               \
   V(Arm64Float32Abs)               \
+  V(Arm64Float32Neg)               \
   V(Arm64Float32Sqrt)              \
   V(Arm64Float32RoundDown)         \
   V(Arm64Float64Cmp)               \
@@ -112,6 +114,7 @@
   V(Arm64Float64RoundTruncate)     \
   V(Arm64Float32RoundTiesEven)     \
   V(Arm64Float64RoundTiesEven)     \
+  V(Arm64Float64SilenceNaN)        \
   V(Arm64Float32ToFloat64)         \
   V(Arm64Float64ToFloat32)         \
   V(Arm64Float32ToInt32)           \
diff --git a/src/compiler/arm64/instruction-scheduler-arm64.cc b/src/compiler/arm64/instruction-scheduler-arm64.cc
index 4320d56..f3797c2 100644
--- a/src/compiler/arm64/instruction-scheduler-arm64.cc
+++ b/src/compiler/arm64/instruction-scheduler-arm64.cc
@@ -85,6 +85,7 @@
     case kArm64Float32Max:
     case kArm64Float32Min:
     case kArm64Float32Abs:
+    case kArm64Float32Neg:
     case kArm64Float32Sqrt:
     case kArm64Float32RoundDown:
     case kArm64Float64Cmp:
@@ -130,11 +131,13 @@
     case kArm64Float64InsertHighWord32:
     case kArm64Float64MoveU64:
     case kArm64U64MoveFloat64:
+    case kArm64Float64SilenceNaN:
       return kNoOpcodeFlags;
 
     case kArm64TestAndBranch32:
     case kArm64TestAndBranch:
     case kArm64CompareAndBranch32:
+    case kArm64CompareAndBranch:
       return kIsBlockTerminator;
 
     case kArm64LdrS:
@@ -291,6 +294,7 @@
 
     case kArm64Float32Abs:
     case kArm64Float32Cmp:
+    case kArm64Float32Neg:
     case kArm64Float64Abs:
     case kArm64Float64Cmp:
     case kArm64Float64Neg:
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc
index 240a4f2..637acac 100644
--- a/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/src/compiler/arm64/instruction-selector-arm64.cc
@@ -256,36 +256,96 @@
   }
 }
 
+// Bitfields describing binary operator properties:
+// CanCommuteField is true if we can switch the two operands, potentially
+// requiring commuting the flags continuation condition.
+typedef BitField8<bool, 1, 1> CanCommuteField;
+// MustCommuteCondField is true when we need to commute the flags continuation
+// condition in order to switch the operands.
+typedef BitField8<bool, 2, 1> MustCommuteCondField;
+// IsComparisonField is true when the operation is a comparison and has no other
+// result other than the condition.
+typedef BitField8<bool, 3, 1> IsComparisonField;
+// IsAddSubField is true when an instruction is encoded as ADD or SUB.
+typedef BitField8<bool, 4, 1> IsAddSubField;
+
+// Get properties of a binary operator.
+uint8_t GetBinopProperties(InstructionCode opcode) {
+  uint8_t result = 0;
+  switch (opcode) {
+    case kArm64Cmp32:
+    case kArm64Cmp:
+      // We can commute CMP by switching the inputs and commuting
+      // the flags continuation.
+      result = CanCommuteField::update(result, true);
+      result = MustCommuteCondField::update(result, true);
+      result = IsComparisonField::update(result, true);
+      // The CMP and CMN instructions are encoded as SUB or ADD
+      // with zero output register, and therefore support the same
+      // operand modes.
+      result = IsAddSubField::update(result, true);
+      break;
+    case kArm64Cmn32:
+    case kArm64Cmn:
+      result = CanCommuteField::update(result, true);
+      result = IsComparisonField::update(result, true);
+      result = IsAddSubField::update(result, true);
+      break;
+    case kArm64Add32:
+    case kArm64Add:
+      result = CanCommuteField::update(result, true);
+      result = IsAddSubField::update(result, true);
+      break;
+    case kArm64Sub32:
+    case kArm64Sub:
+      result = IsAddSubField::update(result, true);
+      break;
+    case kArm64Tst32:
+    case kArm64Tst:
+      result = CanCommuteField::update(result, true);
+      result = IsComparisonField::update(result, true);
+      break;
+    case kArm64And32:
+    case kArm64And:
+    case kArm64Or32:
+    case kArm64Or:
+    case kArm64Eor32:
+    case kArm64Eor:
+      result = CanCommuteField::update(result, true);
+      break;
+    default:
+      UNREACHABLE();
+      return 0;
+  }
+  DCHECK_IMPLIES(MustCommuteCondField::decode(result),
+                 CanCommuteField::decode(result));
+  return result;
+}
+
 // Shared routine for multiple binary operations.
 template <typename Matcher>
 void VisitBinop(InstructionSelector* selector, Node* node,
                 InstructionCode opcode, ImmediateMode operand_mode,
                 FlagsContinuation* cont) {
   Arm64OperandGenerator g(selector);
-  Matcher m(node);
   InstructionOperand inputs[5];
   size_t input_count = 0;
   InstructionOperand outputs[2];
   size_t output_count = 0;
-  bool is_cmp = (opcode == kArm64Cmp32) || (opcode == kArm64Cmn32);
 
-  // We can commute cmp by switching the inputs and commuting the flags
-  // continuation.
-  bool can_commute = m.HasProperty(Operator::kCommutative) || is_cmp;
+  Node* left_node = node->InputAt(0);
+  Node* right_node = node->InputAt(1);
 
-  // The cmp and cmn instructions are encoded as sub or add with zero output
-  // register, and therefore support the same operand modes.
-  bool is_add_sub = m.IsInt32Add() || m.IsInt64Add() || m.IsInt32Sub() ||
-                    m.IsInt64Sub() || is_cmp;
-
-  Node* left_node = m.left().node();
-  Node* right_node = m.right().node();
+  uint8_t properties = GetBinopProperties(opcode);
+  bool can_commute = CanCommuteField::decode(properties);
+  bool must_commute_cond = MustCommuteCondField::decode(properties);
+  bool is_add_sub = IsAddSubField::decode(properties);
 
   if (g.CanBeImmediate(right_node, operand_mode)) {
     inputs[input_count++] = g.UseRegister(left_node);
     inputs[input_count++] = g.UseImmediate(right_node);
-  } else if (is_cmp && g.CanBeImmediate(left_node, operand_mode)) {
-    cont->Commute();
+  } else if (can_commute && g.CanBeImmediate(left_node, operand_mode)) {
+    if (must_commute_cond) cont->Commute();
     inputs[input_count++] = g.UseRegister(right_node);
     inputs[input_count++] = g.UseImmediate(left_node);
   } else if (is_add_sub &&
@@ -295,7 +355,7 @@
   } else if (is_add_sub && can_commute &&
              TryMatchAnyExtend(&g, selector, node, right_node, left_node,
                                &inputs[0], &inputs[1], &opcode)) {
-    if (is_cmp) cont->Commute();
+    if (must_commute_cond) cont->Commute();
     input_count += 2;
   } else if (TryMatchAnyShift(selector, node, right_node, &opcode,
                               !is_add_sub)) {
@@ -305,7 +365,7 @@
     inputs[input_count++] = g.UseImmediate(m_shift.right().node());
   } else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
                                              !is_add_sub)) {
-    if (is_cmp) cont->Commute();
+    if (must_commute_cond) cont->Commute();
     Matcher m_shift(left_node);
     inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
     inputs[input_count++] = g.UseRegister(m_shift.left().node());
@@ -320,7 +380,7 @@
     inputs[input_count++] = g.Label(cont->false_block());
   }
 
-  if (!is_cmp) {
+  if (!IsComparisonField::decode(properties)) {
     outputs[output_count++] = g.DefineAsRegister(node);
   }
 
@@ -329,7 +389,7 @@
   }
 
   DCHECK_NE(0u, input_count);
-  DCHECK((output_count != 0) || is_cmp);
+  DCHECK((output_count != 0) || IsComparisonField::decode(properties));
   DCHECK_GE(arraysize(inputs), input_count);
   DCHECK_GE(arraysize(outputs), output_count);
 
@@ -593,6 +653,17 @@
       UNREACHABLE();
       return;
   }
+  // If the length is a constant power of two, allow the code generator to
+  // pick a more efficient bounds check sequence by passing the length as an
+  // immediate.
+  if (length->opcode() == IrOpcode::kInt32Constant) {
+    Int32Matcher m(length);
+    if (m.IsPowerOf2()) {
+      Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
+           g.UseRegister(offset), g.UseImmediate(length));
+      return;
+    }
+  }
   Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
        g.UseRegister(offset), g.UseOperand(length, kArithmeticImm));
 }
@@ -632,6 +703,17 @@
       UNREACHABLE();
       return;
   }
+  // If the length is a constant power of two, allow the code generator to
+  // pick a more efficient bounds check sequence by passing the length as an
+  // immediate.
+  if (length->opcode() == IrOpcode::kInt32Constant) {
+    Int32Matcher m(length);
+    if (m.IsPowerOf2()) {
+      Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
+           g.UseImmediate(length), g.UseRegisterOrImmediateZero(value));
+      return;
+    }
+  }
   Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
        g.UseOperand(length, kArithmeticImm),
        g.UseRegisterOrImmediateZero(value));
@@ -1665,7 +1747,6 @@
   VisitRR(this, kArm64Float64Abs, node);
 }
 
-
 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
   VisitRR(this, kArm64Float32Sqrt, node);
 }
@@ -1720,6 +1801,28 @@
   VisitRR(this, kArm64Float64RoundTiesEven, node);
 }
 
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+  VisitRR(this, kArm64Float32Neg, node);
+}
+
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+  VisitRR(this, kArm64Float64Neg, node);
+}
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+                                                   InstructionCode opcode) {
+  Arm64OperandGenerator g(this);
+  Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
+       g.UseFixed(node->InputAt(1), d1))
+      ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+                                                  InstructionCode opcode) {
+  Arm64OperandGenerator g(this);
+  Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0))
+      ->MarkAsCall();
+}
 
 void InstructionSelector::EmitPrepareArguments(
     ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
@@ -1853,6 +1956,23 @@
   VisitWordTest(selector, node, kArm64Tst, cont);
 }
 
+template <typename Matcher, ArchOpcode kOpcode>
+bool TryEmitTestAndBranch(InstructionSelector* selector, Node* node,
+                          FlagsContinuation* cont) {
+  Arm64OperandGenerator g(selector);
+  Matcher m(node);
+  if (cont->IsBranch() && m.right().HasValue() &&
+      (base::bits::CountPopulation(m.right().Value()) == 1)) {
+    // If the mask has only one bit set, we can use tbz/tbnz.
+    DCHECK((cont->condition() == kEqual) || (cont->condition() == kNotEqual));
+    selector->Emit(
+        cont->Encode(kOpcode), g.NoOutput(), g.UseRegister(m.left().node()),
+        g.TempImmediate(base::bits::CountTrailingZeros(m.right().Value())),
+        g.Label(cont->true_block()), g.Label(cont->false_block()));
+    return true;
+  }
+  return false;
+}
 
 // Shared routine for multiple float32 compare operations.
 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
@@ -1897,6 +2017,8 @@
   while (selector->CanCover(user, value)) {
     switch (value->opcode()) {
       case IrOpcode::kWord32Equal: {
+        // Combine with comparisons against 0 by simply inverting the
+        // continuation.
         Int32BinopMatcher m(value);
         if (m.right().Is(0)) {
           user = value;
@@ -1919,10 +2041,33 @@
       case IrOpcode::kUint32LessThanOrEqual:
         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
         return VisitWord32Compare(selector, value, cont);
-      case IrOpcode::kWord64Equal:
+      case IrOpcode::kWord64Equal: {
         cont->OverwriteAndNegateIfEqual(kEqual);
+        Int64BinopMatcher m(value);
+        if (m.right().Is(0)) {
+          Node* const left = m.left().node();
+          if (selector->CanCover(value, left) &&
+              left->opcode() == IrOpcode::kWord64And) {
+            // Attempt to merge the Word64Equal(Word64And(x, y), 0) comparison
+            // into a tbz/tbnz instruction.
+            if (TryEmitTestAndBranch<Uint64BinopMatcher, kArm64TestAndBranch>(
+                    selector, left, cont)) {
+              return;
+            }
+            return VisitWordCompare(selector, left, kArm64Tst, cont, true,
+                                    kLogical64Imm);
+          }
+          // Merge the Word64Equal(x, 0) comparison into a cbz instruction.
+          if (cont->IsBranch()) {
+            selector->Emit(cont->Encode(kArm64CompareAndBranch), g.NoOutput(),
+                           g.UseRegister(left), g.Label(cont->true_block()),
+                           g.Label(cont->false_block()));
+            return;
+          }
+        }
         return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
                                 kArithmeticImm);
+      }
       case IrOpcode::kInt64LessThan:
         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
         return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
@@ -1997,42 +2142,20 @@
                                 kArithmeticImm);
       case IrOpcode::kInt32Sub:
         return VisitWord32Compare(selector, value, cont);
-      case IrOpcode::kWord32And: {
-        Int32BinopMatcher m(value);
-        if (cont->IsBranch() && m.right().HasValue() &&
-            (base::bits::CountPopulation32(m.right().Value()) == 1)) {
-          // If the mask has only one bit set, we can use tbz/tbnz.
-          DCHECK((cont->condition() == kEqual) ||
-                 (cont->condition() == kNotEqual));
-          selector->Emit(
-              cont->Encode(kArm64TestAndBranch32), g.NoOutput(),
-              g.UseRegister(m.left().node()),
-              g.TempImmediate(
-                  base::bits::CountTrailingZeros32(m.right().Value())),
-              g.Label(cont->true_block()), g.Label(cont->false_block()));
+      case IrOpcode::kWord32And:
+        if (TryEmitTestAndBranch<Uint32BinopMatcher, kArm64TestAndBranch32>(
+                selector, value, cont)) {
           return;
         }
         return VisitWordCompare(selector, value, kArm64Tst32, cont, true,
                                 kLogical32Imm);
-      }
-      case IrOpcode::kWord64And: {
-        Int64BinopMatcher m(value);
-        if (cont->IsBranch() && m.right().HasValue() &&
-            (base::bits::CountPopulation64(m.right().Value()) == 1)) {
-          // If the mask has only one bit set, we can use tbz/tbnz.
-          DCHECK((cont->condition() == kEqual) ||
-                 (cont->condition() == kNotEqual));
-          selector->Emit(
-              cont->Encode(kArm64TestAndBranch), g.NoOutput(),
-              g.UseRegister(m.left().node()),
-              g.TempImmediate(
-                  base::bits::CountTrailingZeros64(m.right().Value())),
-              g.Label(cont->true_block()), g.Label(cont->false_block()));
+      case IrOpcode::kWord64And:
+        if (TryEmitTestAndBranch<Uint64BinopMatcher, kArm64TestAndBranch>(
+                selector, value, cont)) {
           return;
         }
         return VisitWordCompare(selector, value, kArm64Tst, cont, true,
                                 kLogical64Imm);
-      }
       default:
         break;
     }
@@ -2338,6 +2461,10 @@
        g.UseRegister(left), g.UseRegister(right));
 }
 
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+  VisitRR(this, kArm64Float64SilenceNaN, node);
+}
+
 void InstructionSelector::VisitAtomicLoad(Node* node) {
   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
   Arm64OperandGenerator g(this);
@@ -2414,7 +2541,16 @@
          MachineOperatorBuilder::kInt32DivIsSafe |
          MachineOperatorBuilder::kUint32DivIsSafe |
          MachineOperatorBuilder::kWord32ReverseBits |
-         MachineOperatorBuilder::kWord64ReverseBits;
+         MachineOperatorBuilder::kWord64ReverseBits |
+         MachineOperatorBuilder::kFloat32Neg |
+         MachineOperatorBuilder::kFloat64Neg;
+}
+
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+  return MachineOperatorBuilder::AlignmentRequirements::
+      FullUnalignedAccessSupport();
 }
 
 }  // namespace compiler
diff --git a/src/compiler/ast-graph-builder.cc b/src/compiler/ast-graph-builder.cc
index da8b626..d8d60f3 100644
--- a/src/compiler/ast-graph-builder.cc
+++ b/src/compiler/ast-graph-builder.cc
@@ -281,9 +281,9 @@
     return NewPathToken(TokenDispenserForFinally::kFallThroughToken);
   }
   Node* NewPathDispatchCondition(Node* t1, Node* t2) {
-    // TODO(mstarzinger): This should be machine()->WordEqual(), but our Phi
-    // nodes all have kRepTagged|kTypeAny, which causes representation mismatch.
-    return owner_->NewNode(owner_->javascript()->StrictEqual(), t1, t2);
+    return owner_->NewNode(
+        owner_->javascript()->StrictEqual(CompareOperationHints::Any()), t1,
+        t2);
   }
 
  private:
@@ -416,8 +416,15 @@
   FrameStateBeforeAndAfter(AstGraphBuilder* builder, BailoutId id_before)
       : builder_(builder), frame_state_before_(nullptr) {
     frame_state_before_ = id_before == BailoutId::None()
-                              ? builder_->jsgraph()->EmptyFrameState()
+                              ? builder_->GetEmptyFrameState()
                               : builder_->environment()->Checkpoint(id_before);
+    if (id_before != BailoutId::None()) {
+      // Create an explicit checkpoint node for before the operation.
+      Node* node = builder_->NewNode(builder_->common()->Checkpoint());
+      DCHECK_EQ(IrOpcode::kDead,
+                NodeProperties::GetFrameStateInput(node, 0)->opcode());
+      NodeProperties::ReplaceFrameStateInput(node, 0, frame_state_before_);
+    }
   }
 
   void AddToNode(
@@ -435,7 +442,7 @@
 
       Node* frame_state_after =
           id_after == BailoutId::None()
-              ? builder_->jsgraph()->EmptyFrameState()
+              ? builder_->GetEmptyFrameState()
               : builder_->environment()->Checkpoint(id_after, combine,
                                                     node_has_exception);
 
@@ -444,6 +451,7 @@
 
     if (count >= 2) {
       // Add the frame state for before the operation.
+      // TODO(mstarzinger): Get rid of frame state input before!
       DCHECK_EQ(IrOpcode::kDead,
                 NodeProperties::GetFrameStateInput(node, 1)->opcode());
       NodeProperties::ReplaceFrameStateInput(node, 1, frame_state_before_);
@@ -539,6 +547,18 @@
   return new_target_.get();
 }
 
+Node* AstGraphBuilder::GetEmptyFrameState() {
+  if (!empty_frame_state_.is_set()) {
+    const Operator* op = common()->FrameState(
+        BailoutId::None(), OutputFrameStateCombine::Ignore(), nullptr);
+    Node* node = graph()->NewNode(
+        op, jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(),
+        jsgraph()->EmptyStateValues(), jsgraph()->NoContextConstant(),
+        jsgraph()->UndefinedConstant(), graph()->start());
+    empty_frame_state_.set(node);
+  }
+  return empty_frame_state_.get();
+}
 
 bool AstGraphBuilder::CreateGraph(bool stack_check) {
   Scope* scope = info()->scope();
@@ -875,7 +895,7 @@
                                                OutputFrameStateCombine combine,
                                                bool owner_has_exception) {
   if (!builder()->info()->is_deoptimization_enabled()) {
-    return builder()->jsgraph()->EmptyFrameState();
+    return builder()->GetEmptyFrameState();
   }
 
   UpdateStateValues(&parameters_node_, 0, parameters_count());
@@ -1112,18 +1132,10 @@
       }
       break;
     case VariableLocation::LOOKUP: {
+      DCHECK(!hole_init);
       Node* name = jsgraph()->Constant(variable->name());
-      // For variables we must not push an initial value (such as 'undefined')
-      // because we may have a (legal) redeclaration and we must not destroy
-      // the current value.
-      Node* value =
-          hole_init ? jsgraph()->TheHoleConstant()
-                    : jsgraph()->ZeroConstant();  // Indicates no initial value.
-      Node* attr =
-          jsgraph()->Constant(variable->DeclarationPropertyAttributes());
-      const Operator* op =
-          javascript()->CallRuntime(Runtime::kDeclareLookupSlot);
-      Node* store = NewNode(op, name, value, attr);
+      const Operator* op = javascript()->CallRuntime(Runtime::kDeclareEvalVar);
+      Node* store = NewNode(op, name);
       PrepareFrameState(store, decl->proxy()->id());
       break;
     }
@@ -1162,11 +1174,9 @@
       VisitForValue(decl->fun());
       Node* value = environment()->Pop();
       Node* name = jsgraph()->Constant(variable->name());
-      Node* attr =
-          jsgraph()->Constant(variable->DeclarationPropertyAttributes());
       const Operator* op =
-          javascript()->CallRuntime(Runtime::kDeclareLookupSlot);
-      Node* store = NewNode(op, name, value, attr);
+          javascript()->CallRuntime(Runtime::kDeclareEvalFunction);
+      Node* store = NewNode(op, name, value);
       PrepareFrameState(store, decl->proxy()->id());
       break;
     }
@@ -1289,7 +1299,15 @@
     VisitForValue(clause->label());
     Node* label = environment()->Pop();
     Node* tag = environment()->Top();
-    const Operator* op = javascript()->StrictEqual();
+
+    CompareOperationHints hints;
+    if (!type_hint_analysis_ ||
+        !type_hint_analysis_->GetCompareOperationHints(clause->CompareId(),
+                                                       &hints)) {
+      hints = CompareOperationHints::Any();
+    }
+
+    const Operator* op = javascript()->StrictEqual(hints);
     Node* condition = NewNode(op, tag, label);
     compare_switch.BeginLabel(i, condition);
 
@@ -1365,10 +1383,12 @@
   for_block.BeginBlock();
   // Check for null or undefined before entering loop.
   Node* is_null_cond =
-      NewNode(javascript()->StrictEqual(), object, jsgraph()->NullConstant());
+      NewNode(javascript()->StrictEqual(CompareOperationHints::Any()), object,
+              jsgraph()->NullConstant());
   for_block.BreakWhen(is_null_cond, BranchHint::kFalse);
-  Node* is_undefined_cond = NewNode(javascript()->StrictEqual(), object,
-                                    jsgraph()->UndefinedConstant());
+  Node* is_undefined_cond =
+      NewNode(javascript()->StrictEqual(CompareOperationHints::Any()), object,
+              jsgraph()->UndefinedConstant());
   for_block.BreakWhen(is_undefined_cond, BranchHint::kFalse);
   {
     // Convert object to jsobject.
@@ -1411,8 +1431,9 @@
       PrepareFrameState(value, stmt->FilterId(),
                         OutputFrameStateCombine::Push());
       IfBuilder test_value(this);
-      Node* test_value_cond = NewNode(javascript()->StrictEqual(), value,
-                                      jsgraph()->UndefinedConstant());
+      Node* test_value_cond =
+          NewNode(javascript()->StrictEqual(CompareOperationHints::Any()),
+                  value, jsgraph()->UndefinedConstant());
       test_value.If(test_value_cond, BranchHint::kFalse);
       test_value.Then();
       test_value.Else();
@@ -1602,12 +1623,12 @@
   environment()->Push(literal);
 
   // Load the "prototype" from the constructor.
-  FrameStateBeforeAndAfter states(this, expr->CreateLiteralId());
+  PrepareEagerCheckpoint(expr->CreateLiteralId());
   Handle<Name> name = isolate()->factory()->prototype_string();
   VectorSlotPair pair = CreateVectorSlotPair(expr->PrototypeSlot());
   Node* prototype = BuildNamedLoad(literal, name, pair);
-  states.AddToNode(prototype, expr->PrototypeId(),
-                   OutputFrameStateCombine::Push());
+  PrepareFrameState(prototype, expr->PrototypeId(),
+                    OutputFrameStateCombine::Push());
   environment()->Push(prototype);
 
   // Create nodes to store method values into the literal.
@@ -1647,7 +1668,8 @@
             jsgraph()->Constant(property->NeedsSetFunctionName());
         const Operator* op =
             javascript()->CallRuntime(Runtime::kDefineDataPropertyInLiteral);
-        NewNode(op, receiver, key, value, attr, set_function_name);
+        Node* call = NewNode(op, receiver, key, value, attr, set_function_name);
+        PrepareFrameState(call, BailoutId::None());
         break;
       }
       case ObjectLiteral::Property::GETTER: {
@@ -1676,12 +1698,11 @@
   // Assign to class variable.
   if (expr->class_variable_proxy() != nullptr) {
     Variable* var = expr->class_variable_proxy()->var();
-    FrameStateBeforeAndAfter states(this, BailoutId::None());
     VectorSlotPair feedback = CreateVectorSlotPair(
         expr->NeedsProxySlot() ? expr->ProxySlot()
                                : FeedbackVectorSlot::Invalid());
     BuildVariableAssignment(var, literal, Token::INIT, feedback,
-                            BailoutId::None(), states);
+                            BailoutId::None());
   }
   ast_context()->ProduceValue(literal);
 }
@@ -1715,8 +1736,8 @@
 
 void AstGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
   VectorSlotPair pair = CreateVectorSlotPair(expr->VariableFeedbackSlot());
-  FrameStateBeforeAndAfter states(this, BeforeId(expr));
-  Node* value = BuildVariableLoad(expr->var(), expr->id(), states, pair,
+  PrepareEagerCheckpoint(BeforeId(expr));
+  Node* value = BuildVariableLoad(expr->var(), expr->id(), pair,
                                   ast_context()->GetStateCombine());
   ast_context()->ProduceValue(value);
 }
@@ -1776,15 +1797,15 @@
         if (key->value()->IsInternalizedString()) {
           if (property->emit_store()) {
             VisitForValue(property->value());
-            FrameStateBeforeAndAfter states(this, property->value()->id());
+            PrepareEagerCheckpoint(property->value()->id());
             Node* value = environment()->Pop();
             Node* literal = environment()->Top();
             Handle<Name> name = key->AsPropertyName();
             VectorSlotPair feedback =
                 CreateVectorSlotPair(property->GetSlot(0));
             Node* store = BuildNamedStore(literal, name, value, feedback);
-            states.AddToNode(store, key->id(),
-                             OutputFrameStateCombine::Ignore());
+            PrepareFrameState(store, key->id(),
+                              OutputFrameStateCombine::Ignore());
             BuildSetHomeObject(value, literal, property, 1);
           } else {
             VisitForEffect(property->value());
@@ -1823,12 +1844,16 @@
       }
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
-          accessor_table.lookup(key)->second->getter = property;
+          AccessorTable::Iterator it = accessor_table.lookup(key);
+          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->getter = property;
         }
         break;
       case ObjectLiteral::Property::SETTER:
         if (property->emit_store()) {
-          accessor_table.lookup(key)->second->setter = property;
+          AccessorTable::Iterator it = accessor_table.lookup(key);
+          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->setter = property;
         }
         break;
     }
@@ -1849,8 +1874,7 @@
     const Operator* op =
         javascript()->CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
     Node* call = NewNode(op, literal, name, getter, setter, attr);
-    // This should not lazy deopt on a new literal.
-    PrepareFrameState(call, BailoutId::None());
+    PrepareFrameState(call, it->second->bailout_id);
   }
 
   // Object literals have two parts. The "static" part on the left contains no
@@ -1896,7 +1920,8 @@
             jsgraph()->Constant(property->NeedsSetFunctionName());
         const Operator* op =
             javascript()->CallRuntime(Runtime::kDefineDataPropertyInLiteral);
-        NewNode(op, receiver, key, value, attr, set_function_name);
+        Node* call = NewNode(op, receiver, key, value, attr, set_function_name);
+        PrepareFrameState(call, expr->GetIdForPropertySet(property_index));
         break;
       }
       case ObjectLiteral::Property::PROTOTYPE:
@@ -1961,14 +1986,14 @@
 
     VisitForValue(subexpr);
     {
-      FrameStateBeforeAndAfter states(this, subexpr->id());
+      PrepareEagerCheckpoint(subexpr->id());
       VectorSlotPair pair = CreateVectorSlotPair(expr->LiteralFeedbackSlot());
       Node* value = environment()->Pop();
       Node* index = jsgraph()->Constant(array_index);
       Node* literal = environment()->Top();
       Node* store = BuildKeyedStore(literal, index, value, pair);
-      states.AddToNode(store, expr->GetIdForElement(array_index),
-                       OutputFrameStateCombine::Ignore());
+      PrepareFrameState(store, expr->GetIdForElement(array_index),
+                        OutputFrameStateCombine::Ignore());
     }
   }
 
@@ -2011,49 +2036,49 @@
     case VARIABLE: {
       Variable* var = expr->AsVariableProxy()->var();
       environment()->Push(value);
-      FrameStateBeforeAndAfter states(this, bailout_id_before);
+      PrepareEagerCheckpoint(bailout_id_before);
       value = environment()->Pop();
       BuildVariableAssignment(var, value, Token::ASSIGN, feedback,
-                              bailout_id_after, states);
+                              bailout_id_after);
       break;
     }
     case NAMED_PROPERTY: {
       environment()->Push(value);
       VisitForValue(property->obj());
-      FrameStateBeforeAndAfter states(this, property->obj()->id());
+      PrepareEagerCheckpoint(property->obj()->id());
       Node* object = environment()->Pop();
       value = environment()->Pop();
       Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
       Node* store = BuildNamedStore(object, name, value, feedback);
-      states.AddToNode(store, bailout_id_after,
-                       OutputFrameStateCombine::Ignore());
+      PrepareFrameState(store, bailout_id_after,
+                        OutputFrameStateCombine::Ignore());
       break;
     }
     case KEYED_PROPERTY: {
       environment()->Push(value);
       VisitForValue(property->obj());
       VisitForValue(property->key());
-      FrameStateBeforeAndAfter states(this, property->key()->id());
+      PrepareEagerCheckpoint(property->key()->id());
       Node* key = environment()->Pop();
       Node* object = environment()->Pop();
       value = environment()->Pop();
       Node* store = BuildKeyedStore(object, key, value, feedback);
-      states.AddToNode(store, bailout_id_after,
-                       OutputFrameStateCombine::Ignore());
+      PrepareFrameState(store, bailout_id_after,
+                        OutputFrameStateCombine::Ignore());
       break;
     }
     case NAMED_SUPER_PROPERTY: {
       environment()->Push(value);
       VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
       VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
-      FrameStateBeforeAndAfter states(this, property->obj()->id());
+      PrepareEagerCheckpoint(property->obj()->id());
       Node* home_object = environment()->Pop();
       Node* receiver = environment()->Pop();
       value = environment()->Pop();
       Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
       Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
-      states.AddToNode(store, bailout_id_after,
-                       OutputFrameStateCombine::Ignore());
+      PrepareFrameState(store, bailout_id_after,
+                        OutputFrameStateCombine::Ignore());
       break;
     }
     case KEYED_SUPER_PROPERTY: {
@@ -2061,14 +2086,14 @@
       VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
       VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
       VisitForValue(property->key());
-      FrameStateBeforeAndAfter states(this, property->key()->id());
+      PrepareEagerCheckpoint(property->key()->id());
       Node* key = environment()->Pop();
       Node* home_object = environment()->Pop();
       Node* receiver = environment()->Pop();
       value = environment()->Pop();
       Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
-      states.AddToNode(store, bailout_id_after,
-                       OutputFrameStateCombine::Ignore());
+      PrepareFrameState(store, bailout_id_after,
+                        OutputFrameStateCombine::Ignore());
       break;
     }
   }
@@ -2122,10 +2147,9 @@
         VariableProxy* proxy = expr->target()->AsVariableProxy();
         VectorSlotPair pair =
             CreateVectorSlotPair(proxy->VariableFeedbackSlot());
-        FrameStateBeforeAndAfter states(this, BeforeId(proxy));
-        old_value =
-            BuildVariableLoad(proxy->var(), expr->target()->id(), states, pair,
-                              OutputFrameStateCombine::Push());
+        PrepareEagerCheckpoint(BeforeId(proxy));
+        old_value = BuildVariableLoad(proxy->var(), expr->target()->id(), pair,
+                                      OutputFrameStateCombine::Push());
         break;
       }
       case NAMED_PROPERTY: {
@@ -2133,10 +2157,10 @@
         Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
         VectorSlotPair pair =
             CreateVectorSlotPair(property->PropertyFeedbackSlot());
-        FrameStateBeforeAndAfter states(this, property->obj()->id());
+        PrepareEagerCheckpoint(property->obj()->id());
         old_value = BuildNamedLoad(object, name, pair);
-        states.AddToNode(old_value, property->LoadId(),
-                         OutputFrameStateCombine::Push());
+        PrepareFrameState(old_value, property->LoadId(),
+                          OutputFrameStateCombine::Push());
         break;
       }
       case KEYED_PROPERTY: {
@@ -2144,10 +2168,10 @@
         Node* object = environment()->Peek(1);
         VectorSlotPair pair =
             CreateVectorSlotPair(property->PropertyFeedbackSlot());
-        FrameStateBeforeAndAfter states(this, property->key()->id());
+        PrepareEagerCheckpoint(property->key()->id());
         old_value = BuildKeyedLoad(object, key, pair);
-        states.AddToNode(old_value, property->LoadId(),
-                         OutputFrameStateCombine::Push());
+        PrepareFrameState(old_value, property->LoadId(),
+                          OutputFrameStateCombine::Push());
         break;
       }
       case NAMED_SUPER_PROPERTY: {
@@ -2156,10 +2180,10 @@
         Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
         VectorSlotPair pair =
             CreateVectorSlotPair(property->PropertyFeedbackSlot());
-        FrameStateBeforeAndAfter states(this, property->obj()->id());
+        PrepareEagerCheckpoint(property->obj()->id());
         old_value = BuildNamedSuperLoad(receiver, home_object, name, pair);
-        states.AddToNode(old_value, property->LoadId(),
-                         OutputFrameStateCombine::Push());
+        PrepareFrameState(old_value, property->LoadId(),
+                          OutputFrameStateCombine::Push());
         break;
       }
       case KEYED_SUPER_PROPERTY: {
@@ -2168,10 +2192,10 @@
         Node* receiver = environment()->Peek(2);
         VectorSlotPair pair =
             CreateVectorSlotPair(property->PropertyFeedbackSlot());
-        FrameStateBeforeAndAfter states(this, property->key()->id());
+        PrepareEagerCheckpoint(property->key()->id());
         old_value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
-        states.AddToNode(old_value, property->LoadId(),
-                         OutputFrameStateCombine::Push());
+        PrepareFrameState(old_value, property->LoadId(),
+                          OutputFrameStateCombine::Push());
         break;
       }
     }
@@ -2199,31 +2223,29 @@
     }
   }
 
-  FrameStateBeforeAndAfter store_states(this, before_store_id);
   // Store the value.
+  PrepareEagerCheckpoint(before_store_id);
   Node* value = environment()->Pop();
   VectorSlotPair feedback = CreateVectorSlotPair(expr->AssignmentSlot());
   switch (assign_type) {
     case VARIABLE: {
       Variable* variable = expr->target()->AsVariableProxy()->var();
       BuildVariableAssignment(variable, value, expr->op(), feedback, expr->id(),
-                              store_states, ast_context()->GetStateCombine());
+                              ast_context()->GetStateCombine());
       break;
     }
     case NAMED_PROPERTY: {
       Node* object = environment()->Pop();
       Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
       Node* store = BuildNamedStore(object, name, value, feedback);
-      store_states.AddToNode(store, expr->id(),
-                             ast_context()->GetStateCombine());
+      PrepareFrameState(store, expr->id(), ast_context()->GetStateCombine());
       break;
     }
     case KEYED_PROPERTY: {
       Node* key = environment()->Pop();
       Node* object = environment()->Pop();
       Node* store = BuildKeyedStore(object, key, value, feedback);
-      store_states.AddToNode(store, expr->id(),
-                             ast_context()->GetStateCombine());
+      PrepareFrameState(store, expr->id(), ast_context()->GetStateCombine());
       break;
     }
     case NAMED_SUPER_PROPERTY: {
@@ -2231,8 +2253,7 @@
       Node* receiver = environment()->Pop();
       Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
       Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
-      store_states.AddToNode(store, expr->id(),
-                             ast_context()->GetStateCombine());
+      PrepareFrameState(store, expr->id(), ast_context()->GetStateCombine());
       break;
     }
     case KEYED_SUPER_PROPERTY: {
@@ -2240,8 +2261,7 @@
       Node* home_object = environment()->Pop();
       Node* receiver = environment()->Pop();
       Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
-      store_states.AddToNode(store, expr->id(),
-                             ast_context()->GetStateCombine());
+      PrepareFrameState(store, expr->id(), ast_context()->GetStateCombine());
       break;
     }
   }
@@ -2275,44 +2295,44 @@
       break;
     case NAMED_PROPERTY: {
       VisitForValue(expr->obj());
-      FrameStateBeforeAndAfter states(this, expr->obj()->id());
+      PrepareEagerCheckpoint(expr->obj()->id());
       Node* object = environment()->Pop();
       Handle<Name> name = expr->key()->AsLiteral()->AsPropertyName();
       value = BuildNamedLoad(object, name, pair);
-      states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+      PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
       break;
     }
     case KEYED_PROPERTY: {
       VisitForValue(expr->obj());
       VisitForValue(expr->key());
-      FrameStateBeforeAndAfter states(this, expr->key()->id());
+      PrepareEagerCheckpoint(expr->key()->id());
       Node* key = environment()->Pop();
       Node* object = environment()->Pop();
       value = BuildKeyedLoad(object, key, pair);
-      states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+      PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
       break;
     }
     case NAMED_SUPER_PROPERTY: {
       VisitForValue(expr->obj()->AsSuperPropertyReference()->this_var());
       VisitForValue(expr->obj()->AsSuperPropertyReference()->home_object());
-      FrameStateBeforeAndAfter states(this, expr->obj()->id());
+      PrepareEagerCheckpoint(expr->obj()->id());
       Node* home_object = environment()->Pop();
       Node* receiver = environment()->Pop();
       Handle<Name> name = expr->key()->AsLiteral()->AsPropertyName();
       value = BuildNamedSuperLoad(receiver, home_object, name, pair);
-      states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+      PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
       break;
     }
     case KEYED_SUPER_PROPERTY: {
       VisitForValue(expr->obj()->AsSuperPropertyReference()->this_var());
       VisitForValue(expr->obj()->AsSuperPropertyReference()->home_object());
       VisitForValue(expr->key());
-      FrameStateBeforeAndAfter states(this, expr->key()->id());
+      PrepareEagerCheckpoint(expr->key()->id());
       Node* key = environment()->Pop();
       Node* home_object = environment()->Pop();
       Node* receiver = environment()->Pop();
       value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
-      states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+      PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
       break;
     }
   }
@@ -2334,10 +2354,9 @@
     case Call::GLOBAL_CALL: {
       VariableProxy* proxy = callee->AsVariableProxy();
       VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
-      FrameStateBeforeAndAfter states(this, BeforeId(proxy));
-      callee_value =
-          BuildVariableLoad(proxy->var(), expr->expression()->id(), states,
-                            pair, OutputFrameStateCombine::Push());
+      PrepareEagerCheckpoint(BeforeId(proxy));
+      callee_value = BuildVariableLoad(proxy->var(), expr->expression()->id(),
+                                       pair, OutputFrameStateCombine::Push());
       receiver_hint = ConvertReceiverMode::kNullOrUndefined;
       receiver_value = jsgraph()->UndefinedConstant();
       break;
@@ -2360,12 +2379,12 @@
       VectorSlotPair feedback =
           CreateVectorSlotPair(property->PropertyFeedbackSlot());
       VisitForValue(property->obj());
-      FrameStateBeforeAndAfter states(this, property->obj()->id());
+      PrepareEagerCheckpoint(property->obj()->id());
       Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
       Node* object = environment()->Top();
       callee_value = BuildNamedLoad(object, name, feedback);
-      states.AddToNode(callee_value, property->LoadId(),
-                       OutputFrameStateCombine::Push());
+      PrepareFrameState(callee_value, property->LoadId(),
+                        OutputFrameStateCombine::Push());
       // Note that a property call requires the receiver to be wrapped into
       // an object for sloppy callees. However the receiver is guaranteed
       // not to be null or undefined at this point.
@@ -2379,12 +2398,12 @@
           CreateVectorSlotPair(property->PropertyFeedbackSlot());
       VisitForValue(property->obj());
       VisitForValue(property->key());
-      FrameStateBeforeAndAfter states(this, property->key()->id());
+      PrepareEagerCheckpoint(property->key()->id());
       Node* key = environment()->Pop();
       Node* object = environment()->Top();
       callee_value = BuildKeyedLoad(object, key, feedback);
-      states.AddToNode(callee_value, property->LoadId(),
-                       OutputFrameStateCombine::Push());
+      PrepareFrameState(callee_value, property->LoadId(),
+                        OutputFrameStateCombine::Push());
       // Note that a property call requires the receiver to be wrapped into
       // an object for sloppy callees. However the receiver is guaranteed
       // not to be null or undefined at this point.
@@ -2401,10 +2420,10 @@
       Node* home = environment()->Peek(1);
       Node* object = environment()->Top();
       Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
-      FrameStateBeforeAndAfter states(this, property->obj()->id());
+      PrepareEagerCheckpoint(property->obj()->id());
       callee_value = BuildNamedSuperLoad(object, home, name, VectorSlotPair());
-      states.AddToNode(callee_value, property->LoadId(),
-                       OutputFrameStateCombine::Push());
+      PrepareFrameState(callee_value, property->LoadId(),
+                        OutputFrameStateCombine::Push());
       // Note that a property call requires the receiver to be wrapped into
       // an object for sloppy callees. Since the receiver is not the target of
       // the load, it could very well be null or undefined at this point.
@@ -2424,10 +2443,10 @@
       Node* key = environment()->Pop();
       Node* home = environment()->Pop();
       Node* object = environment()->Pop();
-      FrameStateBeforeAndAfter states(this, property->key()->id());
+      PrepareEagerCheckpoint(property->key()->id());
       callee_value = BuildKeyedSuperLoad(object, home, key, VectorSlotPair());
-      states.AddToNode(callee_value, property->LoadId(),
-                       OutputFrameStateCombine::Push());
+      PrepareFrameState(callee_value, property->LoadId(),
+                        OutputFrameStateCombine::Push());
       // Note that a property call requires the receiver to be wrapped into
       // an object for sloppy callees. Since the receiver is not the target of
       // the load, it could very well be null or undefined at this point.
@@ -2500,10 +2519,10 @@
   VectorSlotPair feedback = CreateVectorSlotPair(expr->CallFeedbackICSlot());
   const Operator* call = javascript()->CallFunction(
       args->length() + 2, feedback, receiver_hint, expr->tail_call_mode());
-  FrameStateBeforeAndAfter states(this, expr->CallId());
+  PrepareEagerCheckpoint(expr->CallId());
   Node* value = ProcessArguments(call, args->length() + 2);
   environment()->Push(value->InputAt(0));  // The callee passed to the call.
-  states.AddToNode(value, expr->ReturnId(), OutputFrameStateCombine::Push());
+  PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
   environment()->Drop(1);
   ast_context()->ProduceValue(value);
 }
@@ -2531,9 +2550,9 @@
   // Create node to perform the super call.
   const Operator* call =
       javascript()->CallConstruct(args->length() + 2, VectorSlotPair());
-  FrameStateBeforeAndAfter states(this, super->new_target_var()->id());
+  PrepareEagerCheckpoint(super->new_target_var()->id());
   Node* value = ProcessArguments(call, args->length() + 2);
-  states.AddToNode(value, expr->ReturnId(), OutputFrameStateCombine::Push());
+  PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
   ast_context()->ProduceValue(value);
 }
 
@@ -2547,8 +2566,8 @@
 
   // The baseline compiler doesn't push the new.target, so we need to record
   // the frame state before the push.
-  FrameStateBeforeAndAfter states(
-      this, args->is_empty() ? expr->expression()->id() : args->last()->id());
+  PrepareEagerCheckpoint(args->is_empty() ? expr->expression()->id()
+                                          : args->last()->id());
 
   // The new target is the same as the callee.
   environment()->Push(environment()->Peek(args->length()));
@@ -2558,7 +2577,7 @@
   const Operator* call =
       javascript()->CallConstruct(args->length() + 2, feedback);
   Node* value = ProcessArguments(call, args->length() + 2);
-  states.AddToNode(value, expr->ReturnId(), OutputFrameStateCombine::Push());
+  PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
   ast_context()->ProduceValue(value);
 }
 
@@ -2578,9 +2597,9 @@
 
   // Create node to perform the JS runtime call.
   const Operator* call = javascript()->CallFunction(args->length() + 2);
-  FrameStateBeforeAndAfter states(this, expr->CallId());
+  PrepareEagerCheckpoint(expr->CallId());
   Node* value = ProcessArguments(call, args->length() + 2);
-  states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+  PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
   ast_context()->ProduceValue(value);
 }
 
@@ -2599,9 +2618,9 @@
   // Create node to perform the runtime call.
   Runtime::FunctionId functionId = expr->function()->function_id;
   const Operator* call = javascript()->CallRuntime(functionId, args->length());
-  FrameStateBeforeAndAfter states(this, expr->CallId());
+  PrepareEagerCheckpoint(expr->CallId());
   Node* value = ProcessArguments(call, args->length());
-  states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+  PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
   ast_context()->ProduceValue(value);
 }
 
@@ -2642,52 +2661,51 @@
     case VARIABLE: {
       VariableProxy* proxy = expr->expression()->AsVariableProxy();
       VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
-      FrameStateBeforeAndAfter states(this, BeforeId(proxy));
-      old_value =
-          BuildVariableLoad(proxy->var(), expr->expression()->id(), states,
-                            pair, OutputFrameStateCombine::Push());
+      PrepareEagerCheckpoint(BeforeId(proxy));
+      old_value = BuildVariableLoad(proxy->var(), expr->expression()->id(),
+                                    pair, OutputFrameStateCombine::Push());
       stack_depth = 0;
       break;
     }
     case NAMED_PROPERTY: {
       VisitForValue(property->obj());
-      FrameStateBeforeAndAfter states(this, property->obj()->id());
+      PrepareEagerCheckpoint(property->obj()->id());
       Node* object = environment()->Top();
       Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
       VectorSlotPair pair =
           CreateVectorSlotPair(property->PropertyFeedbackSlot());
       old_value = BuildNamedLoad(object, name, pair);
-      states.AddToNode(old_value, property->LoadId(),
-                       OutputFrameStateCombine::Push());
+      PrepareFrameState(old_value, property->LoadId(),
+                        OutputFrameStateCombine::Push());
       stack_depth = 1;
       break;
     }
     case KEYED_PROPERTY: {
       VisitForValue(property->obj());
       VisitForValue(property->key());
-      FrameStateBeforeAndAfter states(this, property->key()->id());
+      PrepareEagerCheckpoint(property->key()->id());
       Node* key = environment()->Top();
       Node* object = environment()->Peek(1);
       VectorSlotPair pair =
           CreateVectorSlotPair(property->PropertyFeedbackSlot());
       old_value = BuildKeyedLoad(object, key, pair);
-      states.AddToNode(old_value, property->LoadId(),
-                       OutputFrameStateCombine::Push());
+      PrepareFrameState(old_value, property->LoadId(),
+                        OutputFrameStateCombine::Push());
       stack_depth = 2;
       break;
     }
     case NAMED_SUPER_PROPERTY: {
       VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
       VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
-      FrameStateBeforeAndAfter states(this, property->obj()->id());
+      PrepareEagerCheckpoint(property->obj()->id());
       Node* home_object = environment()->Top();
       Node* receiver = environment()->Peek(1);
       Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
       VectorSlotPair pair =
           CreateVectorSlotPair(property->PropertyFeedbackSlot());
       old_value = BuildNamedSuperLoad(receiver, home_object, name, pair);
-      states.AddToNode(old_value, property->LoadId(),
-                       OutputFrameStateCombine::Push());
+      PrepareFrameState(old_value, property->LoadId(),
+                        OutputFrameStateCombine::Push());
       stack_depth = 2;
       break;
     }
@@ -2695,15 +2713,15 @@
       VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
       VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
       VisitForValue(property->key());
-      FrameStateBeforeAndAfter states(this, property->obj()->id());
+      PrepareEagerCheckpoint(property->obj()->id());
       Node* key = environment()->Top();
       Node* home_object = environment()->Peek(1);
       Node* receiver = environment()->Peek(2);
       VectorSlotPair pair =
           CreateVectorSlotPair(property->PropertyFeedbackSlot());
       old_value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
-      states.AddToNode(old_value, property->LoadId(),
-                       OutputFrameStateCombine::Push());
+      PrepareFrameState(old_value, property->LoadId(),
+                        OutputFrameStateCombine::Push());
       stack_depth = 3;
       break;
     }
@@ -2716,7 +2734,7 @@
 
   // Create a proper eager frame state for the stores.
   environment()->Push(old_value);
-  FrameStateBeforeAndAfter store_states(this, expr->ToNumberId());
+  FrameStateBeforeAndAfter binop_states(this, expr->ToNumberId());
   old_value = environment()->Pop();
 
   // Save result for postfix expressions at correct stack depth.
@@ -2729,16 +2747,12 @@
   }
 
   // Create node to perform +1/-1 operation.
-  Node* value;
-  {
-    // TODO(bmeurer): Cleanup this feedback/bailout mess!
-    FrameStateBeforeAndAfter states(this, BailoutId::None());
-    value = BuildBinaryOp(old_value, jsgraph()->OneConstant(),
-                          expr->binary_op(), expr->CountBinOpFeedbackId());
-    // This should never deoptimize because we have converted to number before.
-    states.AddToNode(value, BailoutId::None(),
-                     OutputFrameStateCombine::Ignore());
-  }
+  // TODO(bmeurer): Cleanup this feedback/bailout mess!
+  Node* value = BuildBinaryOp(old_value, jsgraph()->OneConstant(),
+                              expr->binary_op(), expr->CountBinOpFeedbackId());
+  // This should never deoptimize because we have converted to number before.
+  binop_states.AddToNode(value, BailoutId::None(),
+                         OutputFrameStateCombine::Ignore());
 
   // Store the value.
   VectorSlotPair feedback = CreateVectorSlotPair(expr->CountSlot());
@@ -2747,7 +2761,7 @@
       Variable* variable = expr->expression()->AsVariableProxy()->var();
       environment()->Push(value);
       BuildVariableAssignment(variable, value, expr->op(), feedback,
-                              expr->AssignmentId(), store_states);
+                              expr->AssignmentId());
       environment()->Pop();
       break;
     }
@@ -2756,8 +2770,8 @@
       Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
       Node* store = BuildNamedStore(object, name, value, feedback);
       environment()->Push(value);
-      store_states.AddToNode(store, expr->AssignmentId(),
-                             OutputFrameStateCombine::Ignore());
+      PrepareFrameState(store, expr->AssignmentId(),
+                        OutputFrameStateCombine::Ignore());
       environment()->Pop();
       break;
     }
@@ -2766,8 +2780,8 @@
       Node* object = environment()->Pop();
       Node* store = BuildKeyedStore(object, key, value, feedback);
       environment()->Push(value);
-      store_states.AddToNode(store, expr->AssignmentId(),
-                             OutputFrameStateCombine::Ignore());
+      PrepareFrameState(store, expr->AssignmentId(),
+                        OutputFrameStateCombine::Ignore());
       environment()->Pop();
       break;
     }
@@ -2777,8 +2791,8 @@
       Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
       Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
       environment()->Push(value);
-      store_states.AddToNode(store, expr->AssignmentId(),
-                             OutputFrameStateCombine::Ignore());
+      PrepareFrameState(store, expr->AssignmentId(),
+                        OutputFrameStateCombine::Ignore());
       environment()->Pop();
       break;
     }
@@ -2788,8 +2802,8 @@
       Node* receiver = environment()->Pop();
       Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
       environment()->Push(value);
-      store_states.AddToNode(store, expr->AssignmentId(),
-                             OutputFrameStateCombine::Ignore());
+      PrepareFrameState(store, expr->AssignmentId(),
+                        OutputFrameStateCombine::Ignore());
       environment()->Pop();
       break;
     }
@@ -2829,19 +2843,19 @@
   const Operator* op = nullptr;
   switch (expr->op()) {
     case Token::EQ:
-      op = javascript()->Equal();
+      op = javascript()->Equal(CompareOperationHints::Any());
       break;
     case Token::EQ_STRICT:
-      op = javascript()->StrictEqual();
+      op = javascript()->StrictEqual(CompareOperationHints::Any());
       break;
     default:
       UNREACHABLE();
   }
   VisitForValue(sub_expr);
-  FrameStateBeforeAndAfter states(this, sub_expr->id());
+  PrepareEagerCheckpoint(sub_expr->id());
   Node* value_to_compare = environment()->Pop();
   Node* value = NewNode(op, value_to_compare, nil_value);
-  states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+  PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
   return ast_context()->ProduceValue(value);
 }
 
@@ -2849,11 +2863,11 @@
                                                 Expression* sub_expr,
                                                 Handle<String> check) {
   VisitTypeofExpression(sub_expr);
-  FrameStateBeforeAndAfter states(this, sub_expr->id());
+  PrepareEagerCheckpoint(sub_expr->id());
   Node* typeof_arg = NewNode(javascript()->TypeOf(), environment()->Pop());
-  Node* value = NewNode(javascript()->StrictEqual(), typeof_arg,
-                        jsgraph()->Constant(check));
-  states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+  Node* value = NewNode(javascript()->StrictEqual(CompareOperationHints::Any()),
+                        typeof_arg, jsgraph()->Constant(check));
+  PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
   return ast_context()->ProduceValue(value);
 }
 
@@ -2874,31 +2888,38 @@
     return VisitLiteralCompareNil(expr, sub_expr, jsgraph()->NullConstant());
   }
 
+  CompareOperationHints hints;
+  if (!type_hint_analysis_ ||
+      !type_hint_analysis_->GetCompareOperationHints(
+          expr->CompareOperationFeedbackId(), &hints)) {
+    hints = CompareOperationHints::Any();
+  }
+
   const Operator* op;
   switch (expr->op()) {
     case Token::EQ:
-      op = javascript()->Equal();
+      op = javascript()->Equal(hints);
       break;
     case Token::NE:
-      op = javascript()->NotEqual();
+      op = javascript()->NotEqual(hints);
       break;
     case Token::EQ_STRICT:
-      op = javascript()->StrictEqual();
+      op = javascript()->StrictEqual(hints);
       break;
     case Token::NE_STRICT:
-      op = javascript()->StrictNotEqual();
+      op = javascript()->StrictNotEqual(hints);
       break;
     case Token::LT:
-      op = javascript()->LessThan();
+      op = javascript()->LessThan(hints);
       break;
     case Token::GT:
-      op = javascript()->GreaterThan();
+      op = javascript()->GreaterThan(hints);
       break;
     case Token::LTE:
-      op = javascript()->LessThanOrEqual();
+      op = javascript()->LessThanOrEqual(hints);
       break;
     case Token::GTE:
-      op = javascript()->GreaterThanOrEqual();
+      op = javascript()->GreaterThanOrEqual(hints);
       break;
     case Token::INSTANCEOF:
       op = javascript()->InstanceOf();
@@ -3039,9 +3060,9 @@
     // perform a non-contextual load in case the operand is a variable proxy.
     VariableProxy* proxy = expr->AsVariableProxy();
     VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
-    FrameStateBeforeAndAfter states(this, BeforeId(proxy));
+    PrepareEagerCheckpoint(BeforeId(proxy));
     Node* load =
-        BuildVariableLoad(proxy->var(), expr->id(), states, pair,
+        BuildVariableLoad(proxy->var(), expr->id(), pair,
                           OutputFrameStateCombine::Push(), INSIDE_TYPEOF);
     environment()->Push(load);
   } else {
@@ -3109,7 +3130,7 @@
 
 VectorSlotPair AstGraphBuilder::CreateVectorSlotPair(
     FeedbackVectorSlot slot) const {
-  return VectorSlotPair(handle(info()->shared_info()->feedback_vector()), slot);
+  return VectorSlotPair(handle(info()->closure()->feedback_vector()), slot);
 }
 
 
@@ -3260,9 +3281,8 @@
   // Assign the object to the {arguments} variable. This should never lazy
   // deopt, so it is fine to send invalid bailout id.
   DCHECK(arguments->IsContextSlot() || arguments->IsStackAllocated());
-  FrameStateBeforeAndAfter states(this, BailoutId::None());
   BuildVariableAssignment(arguments, object, Token::ASSIGN, VectorSlotPair(),
-                          BailoutId::None(), states);
+                          BailoutId::None());
   return object;
 }
 
@@ -3279,9 +3299,8 @@
   // Assign the object to the {rest} variable. This should never lazy
   // deopt, so it is fine to send invalid bailout id.
   DCHECK(rest->IsContextSlot() || rest->IsStackAllocated());
-  FrameStateBeforeAndAfter states(this, BailoutId::None());
   BuildVariableAssignment(rest, object, Token::ASSIGN, VectorSlotPair(),
-                          BailoutId::None(), states);
+                          BailoutId::None());
   return object;
 }
 
@@ -3294,9 +3313,8 @@
 
   // Assign the object to the {.this_function} variable. This should never lazy
   // deopt, so it is fine to send invalid bailout id.
-  FrameStateBeforeAndAfter states(this, BailoutId::None());
   BuildVariableAssignment(this_function_var, this_function, Token::INIT,
-                          VectorSlotPair(), BailoutId::None(), states);
+                          VectorSlotPair(), BailoutId::None());
   return this_function;
 }
 
@@ -3309,9 +3327,8 @@
 
   // Assign the object to the {new.target} variable. This should never lazy
   // deopt, so it is fine to send invalid bailout id.
-  FrameStateBeforeAndAfter states(this, BailoutId::None());
   BuildVariableAssignment(new_target_var, object, Token::INIT, VectorSlotPair(),
-                          BailoutId::None(), states);
+                          BailoutId::None());
   return object;
 }
 
@@ -3321,7 +3338,8 @@
                                                BailoutId bailout_id) {
   IfBuilder hole_check(this);
   Node* the_hole = jsgraph()->TheHoleConstant();
-  Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
+  Node* check = NewNode(javascript()->StrictEqual(CompareOperationHints::Any()),
+                        value, the_hole);
   hole_check.If(check);
   hole_check.Then();
   Node* error = BuildThrowReferenceError(variable, bailout_id);
@@ -3338,7 +3356,8 @@
                                                BailoutId bailout_id) {
   IfBuilder hole_check(this);
   Node* the_hole = jsgraph()->TheHoleConstant();
-  Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
+  Node* check = NewNode(javascript()->StrictEqual(CompareOperationHints::Any()),
+                        value, the_hole);
   hole_check.If(check);
   hole_check.Then();
   environment()->Push(for_hole);
@@ -3355,7 +3374,8 @@
   IfBuilder prototype_check(this);
   Node* prototype_string =
       jsgraph()->Constant(isolate()->factory()->prototype_string());
-  Node* check = NewNode(javascript()->StrictEqual(), name, prototype_string);
+  Node* check = NewNode(javascript()->StrictEqual(CompareOperationHints::Any()),
+                        name, prototype_string);
   prototype_check.If(check);
   prototype_check.Then();
   Node* error = BuildThrowStaticPrototypeError(bailout_id);
@@ -3369,7 +3389,6 @@
 
 Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
                                          BailoutId bailout_id,
-                                         FrameStateBeforeAndAfter& states,
                                          const VectorSlotPair& feedback,
                                          OutputFrameStateCombine combine,
                                          TypeofMode typeof_mode) {
@@ -3382,7 +3401,7 @@
       Handle<Name> name = variable->name();
       if (Node* node = TryLoadGlobalConstant(name)) return node;
       Node* value = BuildGlobalLoad(name, feedback, typeof_mode);
-      states.AddToNode(value, bailout_id, combine);
+      PrepareFrameState(value, bailout_id, combine);
       return value;
     }
     case VariableLocation::PARAMETER:
@@ -3418,13 +3437,12 @@
     case VariableLocation::LOOKUP: {
       // Dynamic lookup of context variable (anywhere in the chain).
       Handle<String> name = variable->name();
-      if (Node* node =
-              TryLoadDynamicVariable(variable, name, bailout_id, states,
-                                     feedback, combine, typeof_mode)) {
+      if (Node* node = TryLoadDynamicVariable(variable, name, bailout_id,
+                                              feedback, combine, typeof_mode)) {
         return node;
       }
       Node* value = BuildDynamicLoad(name, typeof_mode);
-      states.AddToNode(value, bailout_id, combine);
+      PrepareFrameState(value, bailout_id, combine);
       return value;
     }
   }
@@ -3467,11 +3485,10 @@
   return nullptr;
 }
 
-
 Node* AstGraphBuilder::BuildVariableAssignment(
     Variable* variable, Node* value, Token::Value op,
     const VectorSlotPair& feedback, BailoutId bailout_id,
-    FrameStateBeforeAndAfter& states, OutputFrameStateCombine combine) {
+    OutputFrameStateCombine combine) {
   Node* the_hole = jsgraph()->TheHoleConstant();
   VariableMode mode = variable->mode();
   switch (variable->location()) {
@@ -3480,7 +3497,7 @@
       // Global var, const, or let variable.
       Handle<Name> name = variable->name();
       Node* store = BuildGlobalStore(name, value, feedback);
-      states.AddToNode(store, bailout_id, combine);
+      PrepareFrameState(store, bailout_id, combine);
       return store;
     }
     case VariableLocation::PARAMETER:
@@ -3740,11 +3757,11 @@
   Expression* expr = property->value();
   if (!FunctionLiteral::NeedsHomeObject(expr)) return value;
   Handle<Name> name = isolate()->factory()->home_object_symbol();
-  FrameStateBeforeAndAfter states(this, BailoutId::None());
   VectorSlotPair feedback =
       CreateVectorSlotPair(property->GetSlot(slot_number));
   Node* store = BuildNamedStore(value, name, home_object, feedback);
-  states.AddToNode(store, BailoutId::None(), OutputFrameStateCombine::Ignore());
+  PrepareFrameState(store, BailoutId::None(),
+                    OutputFrameStateCombine::Ignore());
   return store;
 }
 
@@ -3881,11 +3898,12 @@
   return nullptr;
 }
 
-
-Node* AstGraphBuilder::TryLoadDynamicVariable(
-    Variable* variable, Handle<String> name, BailoutId bailout_id,
-    FrameStateBeforeAndAfter& states, const VectorSlotPair& feedback,
-    OutputFrameStateCombine combine, TypeofMode typeof_mode) {
+Node* AstGraphBuilder::TryLoadDynamicVariable(Variable* variable,
+                                              Handle<String> name,
+                                              BailoutId bailout_id,
+                                              const VectorSlotPair& feedback,
+                                              OutputFrameStateCombine combine,
+                                              TypeofMode typeof_mode) {
   VariableMode mode = variable->mode();
 
   if (mode == DYNAMIC_GLOBAL) {
@@ -3907,8 +3925,9 @@
       Node* load = NewNode(
           javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
           current_context());
-      Node* check = NewNode(javascript()->StrictEqual(), load,
-                            jsgraph()->TheHoleConstant());
+      Node* check =
+          NewNode(javascript()->StrictEqual(CompareOperationHints::Any()), load,
+                  jsgraph()->TheHoleConstant());
       fast_block.BreakUnless(check, BranchHint::kTrue);
     }
 
@@ -3918,7 +3937,7 @@
     } else {
       // Perform global slot load.
       Node* fast = BuildGlobalLoad(name, feedback, typeof_mode);
-      states.AddToNode(fast, bailout_id, combine);
+      PrepareFrameState(fast, bailout_id, combine);
       environment()->Push(fast);
     }
     slow_block.Break();
@@ -3927,7 +3946,7 @@
 
     // Slow case, because variable potentially shadowed. Perform dynamic lookup.
     Node* slow = BuildDynamicLoad(name, typeof_mode);
-    states.AddToNode(slow, bailout_id, combine);
+    PrepareFrameState(slow, bailout_id, combine);
     environment()->Push(slow);
     slow_block.EndBlock();
 
@@ -3953,16 +3972,17 @@
       Node* load = NewNode(
           javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
           current_context());
-      Node* check = NewNode(javascript()->StrictEqual(), load,
-                            jsgraph()->TheHoleConstant());
+      Node* check =
+          NewNode(javascript()->StrictEqual(CompareOperationHints::Any()), load,
+                  jsgraph()->TheHoleConstant());
       fast_block.BreakUnless(check, BranchHint::kTrue);
     }
 
     // Fast case, because variable is not shadowed. Perform context slot load.
     Variable* local = variable->local_if_not_shadowed();
     DCHECK(local->location() == VariableLocation::CONTEXT);  // Must be context.
-    Node* fast = BuildVariableLoad(local, bailout_id, states, feedback, combine,
-                                   typeof_mode);
+    Node* fast =
+        BuildVariableLoad(local, bailout_id, feedback, combine, typeof_mode);
     environment()->Push(fast);
     slow_block.Break();
     environment()->Pop();
@@ -3970,7 +3990,7 @@
 
     // Slow case, because variable potentially shadowed. Perform dynamic lookup.
     Node* slow = BuildDynamicLoad(name, typeof_mode);
-    states.AddToNode(slow, bailout_id, combine);
+    PrepareFrameState(slow, bailout_id, combine);
     environment()->Push(slow);
     slow_block.EndBlock();
 
@@ -4053,6 +4073,20 @@
   }
 }
 
+void AstGraphBuilder::PrepareEagerCheckpoint(BailoutId ast_id) {
+  if (environment()->GetEffectDependency()->opcode() == IrOpcode::kCheckpoint) {
+    // We skip preparing a checkpoint if there already is one the current effect
+    // dependency. This is just an optimization and not need for correctness.
+    return;
+  }
+  if (ast_id != BailoutId::None()) {
+    Node* node = NewNode(common()->Checkpoint());
+    DCHECK_EQ(IrOpcode::kDead,
+              NodeProperties::GetFrameStateInput(node, 0)->opcode());
+    NodeProperties::ReplaceFrameStateInput(node, 0,
+                                           environment()->Checkpoint(ast_id));
+  }
+}
 
 BitVector* AstGraphBuilder::GetVariablesAssignedInLoop(
     IterationStatement* stmt) {
@@ -4298,7 +4332,6 @@
 }
 
 
-// TODO(mstarzinger): Revisit this once we have proper effect states.
 Node* AstGraphBuilder::NewEffectPhi(int count, Node* input, Node* control) {
   const Operator* phi_op = common()->EffectPhi(count);
   Node** buffer = EnsureInputBufferSize(count + 1);
diff --git a/src/compiler/ast-graph-builder.h b/src/compiler/ast-graph-builder.h
index 1d0fc90..8346a51 100644
--- a/src/compiler/ast-graph-builder.h
+++ b/src/compiler/ast-graph-builder.h
@@ -106,6 +106,9 @@
   // Optimization to cache loaded feedback vector.
   SetOncePointer<Node> feedback_vector_;
 
+  // Optimization to cache empty frame state.
+  SetOncePointer<Node> empty_frame_state_;
+
   // Control nodes that exit the function body.
   ZoneVector<Node*> exit_controls_;
 
@@ -167,6 +170,9 @@
   // Get or create the node that represents the incoming new target value.
   Node* GetNewTarget();
 
+  // Get or create the node that represents the empty frame state.
+  Node* GetEmptyFrameState();
+
   // Node creation helpers.
   Node* NewNode(const Operator* op, bool incomplete = false) {
     return MakeNode(op, 0, static_cast<Node**>(nullptr), incomplete);
@@ -225,11 +231,18 @@
   // Helper to indicate a node exits the function body.
   void UpdateControlDependencyToLeaveFunction(Node* exit);
 
-  // Builds deoptimization for a given node.
+  // Prepare information for lazy deoptimization. This information is attached
+  // to the given node and the output value produced by the node is combined.
+  // Conceptually this frame state is "after" a given operation.
   void PrepareFrameState(Node* node, BailoutId ast_id,
                          OutputFrameStateCombine framestate_combine =
                              OutputFrameStateCombine::Ignore());
 
+  // Prepare information for eager deoptimization. This information is carried
+  // by dedicated {Checkpoint} nodes that are wired into the effect chain.
+  // Conceptually this frame state is "before" a given operation.
+  void PrepareEagerCheckpoint(BailoutId ast_id);
+
   BitVector* GetVariablesAssignedInLoop(IterationStatement* stmt);
 
   // Check if the given statement is an OSR entry.
@@ -277,13 +290,11 @@
   Node* BuildVariableAssignment(Variable* variable, Node* value,
                                 Token::Value op, const VectorSlotPair& slot,
                                 BailoutId bailout_id,
-                                FrameStateBeforeAndAfter& states,
                                 OutputFrameStateCombine framestate_combine =
                                     OutputFrameStateCombine::Ignore());
   Node* BuildVariableDelete(Variable* variable, BailoutId bailout_id,
                             OutputFrameStateCombine framestate_combine);
   Node* BuildVariableLoad(Variable* variable, BailoutId bailout_id,
-                          FrameStateBeforeAndAfter& states,
                           const VectorSlotPair& feedback,
                           OutputFrameStateCombine framestate_combine,
                           TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
@@ -374,7 +385,6 @@
   // to resolve to a global slot or context slot (inferred from scope chain).
   Node* TryLoadDynamicVariable(Variable* variable, Handle<String> name,
                                BailoutId bailout_id,
-                               FrameStateBeforeAndAfter& states,
                                const VectorSlotPair& feedback,
                                OutputFrameStateCombine combine,
                                TypeofMode typeof_mode);
diff --git a/src/compiler/branch-elimination.cc b/src/compiler/branch-elimination.cc
index 427612c..236fbca 100644
--- a/src/compiler/branch-elimination.cc
+++ b/src/compiler/branch-elimination.cc
@@ -99,17 +99,17 @@
   if (condition_value.IsJust()) {
     // If we know the condition we can discard the branch.
     if (condition_is_true == condition_value.FromJust()) {
-      // We don't to update the conditions here, because we're replacing with
-      // the {control} node that already contains the right information.
-      return Replace(control);
+      // We don't update the conditions here, because we're replacing {node}
+      // with the {control} node that already contains the right information.
+      ReplaceWithValue(node, dead(), effect, control);
     } else {
       control = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
                                  frame_state, effect, control);
       // TODO(bmeurer): This should be on the AdvancedReducer somehow.
       NodeProperties::MergeControlToEnd(graph(), common(), control);
       Revisit(graph()->end());
-      return Replace(dead());
     }
+    return Replace(dead());
   }
   return UpdateConditions(
       node, conditions->AddCondition(zone_, condition, condition_is_true));
diff --git a/src/compiler/bytecode-graph-builder.cc b/src/compiler/bytecode-graph-builder.cc
index 22299de..79d8ff2 100644
--- a/src/compiler/bytecode-graph-builder.cc
+++ b/src/compiler/bytecode-graph-builder.cc
@@ -109,6 +109,11 @@
         id_before, OutputFrameStateCombine::Ignore());
     id_after_ = BailoutId(id_before.ToInt() +
                           builder->bytecode_iterator().current_bytecode_size());
+    // Create an explicit checkpoint node for before the operation.
+    Node* node = builder_->NewNode(builder_->common()->Checkpoint());
+    DCHECK_EQ(IrOpcode::kDead,
+              NodeProperties::GetFrameStateInput(node, 0)->opcode());
+    NodeProperties::ReplaceFrameStateInput(node, 0, frame_state_before_);
   }
 
   ~FrameStateBeforeAndAfter() {
@@ -136,6 +141,7 @@
 
     if (count >= 2) {
       // Add the frame state for before the operation.
+      // TODO(mstarzinger): Get rid of frame state input before!
       DCHECK_EQ(IrOpcode::kDead,
                 NodeProperties::GetFrameStateInput(node, 1)->opcode());
       NodeProperties::ReplaceFrameStateInput(node, 1, frame_state_before_);
@@ -355,9 +361,6 @@
 
 bool BytecodeGraphBuilder::Environment::StateValuesRequireUpdate(
     Node** state_values, int offset, int count) {
-  if (!builder()->deoptimization_enabled_) {
-    return false;
-  }
   if (*state_values == nullptr) {
     return true;
   }
@@ -385,10 +388,6 @@
 
 Node* BytecodeGraphBuilder::Environment::Checkpoint(
     BailoutId bailout_id, OutputFrameStateCombine combine) {
-  if (!builder()->deoptimization_enabled_) {
-    return builder()->jsgraph()->EmptyFrameState();
-  }
-
   // TODO(rmcilroy): Consider using StateValuesCache for some state values.
   UpdateStateValues(&parameters_state_values_, 0, parameter_count());
   UpdateStateValues(&registers_state_values_, register_base(),
@@ -423,7 +422,6 @@
 
 bool BytecodeGraphBuilder::Environment::StateValuesAreUpToDate(
     int output_poke_offset, int output_poke_count) {
-  if (!builder()->deoptimization_enabled_) return true;
   // Poke offset is relative to the top of the stack (i.e., the accumulator).
   int output_poke_start = accumulator_base() - output_poke_offset;
   int output_poke_end = output_poke_start + output_poke_count;
@@ -444,12 +442,11 @@
       bytecode_array_(handle(info->shared_info()->bytecode_array())),
       exception_handler_table_(
           handle(HandlerTable::cast(bytecode_array()->handler_table()))),
-      feedback_vector_(handle(info->shared_info()->feedback_vector())),
+      feedback_vector_(handle(info->closure()->feedback_vector())),
       frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
           FrameStateType::kInterpretedFunction,
           bytecode_array()->parameter_count(),
           bytecode_array()->register_count(), info->shared_info())),
-      deoptimization_enabled_(info->is_deoptimization_enabled()),
       merge_environments_(local_zone),
       exception_handlers_(local_zone),
       current_exception_handler_(0),
@@ -586,6 +583,11 @@
   environment()->BindAccumulator(node);
 }
 
+void BytecodeGraphBuilder::VisitLdrUndefined() {
+  Node* node = jsgraph()->UndefinedConstant();
+  environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), node);
+}
+
 void BytecodeGraphBuilder::VisitLdaNull() {
   Node* node = jsgraph()->NullConstant();
   environment()->BindAccumulator(node);
@@ -623,25 +625,33 @@
   environment()->BindRegister(bytecode_iterator().GetRegisterOperand(1), value);
 }
 
-void BytecodeGraphBuilder::BuildLoadGlobal(
-    TypeofMode typeof_mode) {
-  FrameStateBeforeAndAfter states(this);
-  Handle<Name> name =
-      Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+Node* BytecodeGraphBuilder::BuildLoadGlobal(TypeofMode typeof_mode) {
   VectorSlotPair feedback =
-      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
-
+      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(0));
+  DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
+            feedback_vector()->GetKind(feedback.slot()));
+  Handle<Name> name(feedback_vector()->GetName(feedback.slot()));
   const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
-  Node* node = NewNode(op, GetFunctionClosure());
-  environment()->BindAccumulator(node, &states);
+  return NewNode(op, GetFunctionClosure());
 }
 
 void BytecodeGraphBuilder::VisitLdaGlobal() {
-  BuildLoadGlobal(TypeofMode::NOT_INSIDE_TYPEOF);
+  FrameStateBeforeAndAfter states(this);
+  Node* node = BuildLoadGlobal(TypeofMode::NOT_INSIDE_TYPEOF);
+  environment()->BindAccumulator(node, &states);
+}
+
+void BytecodeGraphBuilder::VisitLdrGlobal() {
+  FrameStateBeforeAndAfter states(this);
+  Node* node = BuildLoadGlobal(TypeofMode::NOT_INSIDE_TYPEOF);
+  environment()->BindRegister(bytecode_iterator().GetRegisterOperand(1), node,
+                              &states);
 }
 
 void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
-  BuildLoadGlobal(TypeofMode::INSIDE_TYPEOF);
+  FrameStateBeforeAndAfter states(this);
+  Node* node = BuildLoadGlobal(TypeofMode::INSIDE_TYPEOF);
+  environment()->BindAccumulator(node, &states);
 }
 
 void BytecodeGraphBuilder::BuildStoreGlobal(LanguageMode language_mode) {
@@ -665,7 +675,7 @@
   BuildStoreGlobal(LanguageMode::STRICT);
 }
 
-void BytecodeGraphBuilder::VisitLdaContextSlot() {
+Node* BytecodeGraphBuilder::BuildLoadContextSlot() {
   // TODO(mythria): LoadContextSlots are unrolled by the required depth when
   // generating bytecode. Hence the value of depth is always 0. Update this
   // code, when the implementation changes.
@@ -676,10 +686,19 @@
       0, bytecode_iterator().GetIndexOperand(1), false);
   Node* context =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
-  Node* node = NewNode(op, context);
+  return NewNode(op, context);
+}
+
+void BytecodeGraphBuilder::VisitLdaContextSlot() {
+  Node* node = BuildLoadContextSlot();
   environment()->BindAccumulator(node);
 }
 
+void BytecodeGraphBuilder::VisitLdrContextSlot() {
+  Node* node = BuildLoadContextSlot();
+  environment()->BindRegister(bytecode_iterator().GetRegisterOperand(2), node);
+}
+
 void BytecodeGraphBuilder::VisitStaContextSlot() {
   // TODO(mythria): LoadContextSlots are unrolled by the required depth when
   // generating bytecode. Hence the value of depth is always 0. Update this
@@ -732,8 +751,7 @@
   BuildStaLookupSlot(LanguageMode::STRICT);
 }
 
-void BytecodeGraphBuilder::BuildNamedLoad() {
-  FrameStateBeforeAndAfter states(this);
+Node* BytecodeGraphBuilder::BuildNamedLoad() {
   Node* object =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Handle<Name> name =
@@ -742,14 +760,23 @@
       CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
 
   const Operator* op = javascript()->LoadNamed(name, feedback);
-  Node* node = NewNode(op, object, GetFunctionClosure());
+  return NewNode(op, object, GetFunctionClosure());
+}
+
+void BytecodeGraphBuilder::VisitLdaNamedProperty() {
+  FrameStateBeforeAndAfter states(this);
+  Node* node = BuildNamedLoad();
   environment()->BindAccumulator(node, &states);
 }
 
-void BytecodeGraphBuilder::VisitLoadIC() { BuildNamedLoad(); }
-
-void BytecodeGraphBuilder::BuildKeyedLoad() {
+void BytecodeGraphBuilder::VisitLdrNamedProperty() {
   FrameStateBeforeAndAfter states(this);
+  Node* node = BuildNamedLoad();
+  environment()->BindRegister(bytecode_iterator().GetRegisterOperand(3), node,
+                              &states);
+}
+
+Node* BytecodeGraphBuilder::BuildKeyedLoad() {
   Node* key = environment()->LookupAccumulator();
   Node* object =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
@@ -757,11 +784,21 @@
       CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
 
   const Operator* op = javascript()->LoadProperty(feedback);
-  Node* node = NewNode(op, object, key, GetFunctionClosure());
+  return NewNode(op, object, key, GetFunctionClosure());
+}
+
+void BytecodeGraphBuilder::VisitLdaKeyedProperty() {
+  FrameStateBeforeAndAfter states(this);
+  Node* node = BuildKeyedLoad();
   environment()->BindAccumulator(node, &states);
 }
 
-void BytecodeGraphBuilder::VisitKeyedLoadIC() { BuildKeyedLoad(); }
+void BytecodeGraphBuilder::VisitLdrKeyedProperty() {
+  FrameStateBeforeAndAfter states(this);
+  Node* node = BuildKeyedLoad();
+  environment()->BindRegister(bytecode_iterator().GetRegisterOperand(2), node,
+                              &states);
+}
 
 void BytecodeGraphBuilder::BuildNamedStore(LanguageMode language_mode) {
   FrameStateBeforeAndAfter states(this);
@@ -778,11 +815,11 @@
   environment()->RecordAfterState(node, &states);
 }
 
-void BytecodeGraphBuilder::VisitStoreICSloppy() {
+void BytecodeGraphBuilder::VisitStaNamedPropertySloppy() {
   BuildNamedStore(LanguageMode::SLOPPY);
 }
 
-void BytecodeGraphBuilder::VisitStoreICStrict() {
+void BytecodeGraphBuilder::VisitStaNamedPropertyStrict() {
   BuildNamedStore(LanguageMode::STRICT);
 }
 
@@ -801,11 +838,11 @@
   environment()->RecordAfterState(node, &states);
 }
 
-void BytecodeGraphBuilder::VisitKeyedStoreICSloppy() {
+void BytecodeGraphBuilder::VisitStaKeyedPropertySloppy() {
   BuildKeyedStore(LanguageMode::SLOPPY);
 }
 
-void BytecodeGraphBuilder::VisitKeyedStoreICStrict() {
+void BytecodeGraphBuilder::VisitStaKeyedPropertyStrict() {
   BuildKeyedStore(LanguageMode::STRICT);
 }
 
@@ -965,8 +1002,7 @@
 
 void BytecodeGraphBuilder::VisitCallRuntime() {
   FrameStateBeforeAndAfter states(this);
-  Runtime::FunctionId functionId = static_cast<Runtime::FunctionId>(
-      bytecode_iterator().GetRuntimeIdOperand(0));
+  Runtime::FunctionId functionId = bytecode_iterator().GetRuntimeIdOperand(0);
   interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
   size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
 
@@ -978,8 +1014,7 @@
 
 void BytecodeGraphBuilder::VisitCallRuntimeForPair() {
   FrameStateBeforeAndAfter states(this);
-  Runtime::FunctionId functionId = static_cast<Runtime::FunctionId>(
-      bytecode_iterator().GetRuntimeIdOperand(0));
+  Runtime::FunctionId functionId = bytecode_iterator().GetRuntimeIdOperand(0);
   interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
   size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
   interpreter::Register first_return =
@@ -993,8 +1028,7 @@
 
 void BytecodeGraphBuilder::VisitInvokeIntrinsic() {
   FrameStateBeforeAndAfter states(this);
-  Runtime::FunctionId functionId = static_cast<Runtime::FunctionId>(
-      bytecode_iterator().GetRuntimeIdOperand(0));
+  Runtime::FunctionId functionId = bytecode_iterator().GetIntrinsicIdOperand(0);
   interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
   size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
 
@@ -1188,31 +1222,38 @@
 }
 
 void BytecodeGraphBuilder::VisitTestEqual() {
-  BuildCompareOp(javascript()->Equal());
+  CompareOperationHints hints = CompareOperationHints::Any();
+  BuildCompareOp(javascript()->Equal(hints));
 }
 
 void BytecodeGraphBuilder::VisitTestNotEqual() {
-  BuildCompareOp(javascript()->NotEqual());
+  CompareOperationHints hints = CompareOperationHints::Any();
+  BuildCompareOp(javascript()->NotEqual(hints));
 }
 
 void BytecodeGraphBuilder::VisitTestEqualStrict() {
-  BuildCompareOp(javascript()->StrictEqual());
+  CompareOperationHints hints = CompareOperationHints::Any();
+  BuildCompareOp(javascript()->StrictEqual(hints));
 }
 
 void BytecodeGraphBuilder::VisitTestLessThan() {
-  BuildCompareOp(javascript()->LessThan());
+  CompareOperationHints hints = CompareOperationHints::Any();
+  BuildCompareOp(javascript()->LessThan(hints));
 }
 
 void BytecodeGraphBuilder::VisitTestGreaterThan() {
-  BuildCompareOp(javascript()->GreaterThan());
+  CompareOperationHints hints = CompareOperationHints::Any();
+  BuildCompareOp(javascript()->GreaterThan(hints));
 }
 
 void BytecodeGraphBuilder::VisitTestLessThanOrEqual() {
-  BuildCompareOp(javascript()->LessThanOrEqual());
+  CompareOperationHints hints = CompareOperationHints::Any();
+  BuildCompareOp(javascript()->LessThanOrEqual(hints));
 }
 
 void BytecodeGraphBuilder::VisitTestGreaterThanOrEqual() {
-  BuildCompareOp(javascript()->GreaterThanOrEqual());
+  CompareOperationHints hints = CompareOperationHints::Any();
+  BuildCompareOp(javascript()->GreaterThanOrEqual(hints));
 }
 
 void BytecodeGraphBuilder::VisitTestIn() {
@@ -1376,16 +1417,26 @@
   Node* state = environment()->LookupAccumulator();
   Node* generator = environment()->LookupRegister(
       bytecode_iterator().GetRegisterOperand(0));
+  // The offsets used by the bytecode iterator are relative to a different base
+  // than what is used in the interpreter, hence the addition.
+  Node* offset =
+      jsgraph()->Constant(bytecode_iterator().current_offset() +
+                          (BytecodeArray::kHeaderSize - kHeapObjectTag));
 
-  for (int i = 0; i < environment()->register_count(); ++i) {
-    Node* value = environment()->LookupRegister(interpreter::Register(i));
-    NewNode(javascript()->CallRuntime(Runtime::kGeneratorStoreRegister),
-        generator, jsgraph()->Constant(i), value);
+  int register_count = environment()->register_count();
+  int value_input_count = 3 + register_count;
+
+  Node** value_inputs = local_zone()->NewArray<Node*>(value_input_count);
+  value_inputs[0] = generator;
+  value_inputs[1] = state;
+  value_inputs[2] = offset;
+  for (int i = 0; i < register_count; ++i) {
+    value_inputs[3 + i] =
+        environment()->LookupRegister(interpreter::Register(i));
   }
 
-  NewNode(javascript()->CallRuntime(Runtime::kGeneratorSetContext), generator);
-  NewNode(javascript()->CallRuntime(Runtime::kGeneratorSetContinuation),
-      generator, state);
+  MakeNode(javascript()->GeneratorStore(register_count), value_input_count,
+           value_inputs, false);
 }
 
 void BytecodeGraphBuilder::VisitResumeGenerator() {
@@ -1393,23 +1444,16 @@
 
   Node* generator = environment()->LookupRegister(
       bytecode_iterator().GetRegisterOperand(0));
-  Node* state = NewNode(javascript()->CallRuntime(
-      Runtime::kGeneratorGetContinuation), generator);
 
   // Bijection between registers and array indices must match that used in
   // InterpreterAssembler::ExportRegisterFile.
   for (int i = 0; i < environment()->register_count(); ++i) {
-    Node* value = NewNode(
-        javascript()->CallRuntime(Runtime::kGeneratorLoadRegister),
-        generator, jsgraph()->Constant(i));
+    Node* value = NewNode(javascript()->GeneratorRestoreRegister(i), generator);
     environment()->BindRegister(interpreter::Register(i), value);
-
-    NewNode(javascript()->CallRuntime(Runtime::kGeneratorStoreRegister),
-        generator, jsgraph()->Constant(i), jsgraph()->StaleRegisterConstant());
   }
 
-  NewNode(javascript()->CallRuntime(Runtime::kGeneratorSetContinuation),
-      generator, jsgraph()->Constant(JSGeneratorObject::kGeneratorExecuting));
+  Node* state =
+      NewNode(javascript()->GeneratorRestoreContinuation(), generator);
 
   environment()->BindAccumulator(state, &states);
 }
@@ -1485,7 +1529,8 @@
 void BytecodeGraphBuilder::BuildJumpIfEqual(Node* comperand) {
   Node* accumulator = environment()->LookupAccumulator();
   Node* condition =
-      NewNode(javascript()->StrictEqual(), accumulator, comperand);
+      NewNode(javascript()->StrictEqual(CompareOperationHints::Any()),
+              accumulator, comperand);
   BuildConditionalJump(condition);
 }
 
@@ -1494,14 +1539,17 @@
   Node* accumulator = environment()->LookupAccumulator();
   Node* to_boolean =
       NewNode(javascript()->ToBoolean(ToBooleanHint::kAny), accumulator);
-  Node* condition = NewNode(javascript()->StrictEqual(), to_boolean, comperand);
+  Node* condition =
+      NewNode(javascript()->StrictEqual(CompareOperationHints::Any()),
+              to_boolean, comperand);
   BuildConditionalJump(condition);
 }
 
 void BytecodeGraphBuilder::BuildJumpIfNotHole() {
   Node* accumulator = environment()->LookupAccumulator();
-  Node* condition = NewNode(javascript()->StrictEqual(), accumulator,
-                            jsgraph()->TheHoleConstant());
+  Node* condition =
+      NewNode(javascript()->StrictEqual(CompareOperationHints::Any()),
+              accumulator, jsgraph()->TheHoleConstant());
   Node* node =
       NewNode(common()->Select(MachineRepresentation::kTagged), condition,
               jsgraph()->FalseConstant(), jsgraph()->TrueConstant());
diff --git a/src/compiler/bytecode-graph-builder.h b/src/compiler/bytecode-graph-builder.h
index c842c24..66cd96e 100644
--- a/src/compiler/bytecode-graph-builder.h
+++ b/src/compiler/bytecode-graph-builder.h
@@ -112,11 +112,12 @@
 
   void BuildCreateLiteral(const Operator* op);
   void BuildCreateArguments(CreateArgumentsType type);
-  void BuildLoadGlobal(TypeofMode typeof_mode);
+  Node* BuildLoadContextSlot();
+  Node* BuildLoadGlobal(TypeofMode typeof_mode);
   void BuildStoreGlobal(LanguageMode language_mode);
-  void BuildNamedLoad();
-  void BuildKeyedLoad();
+  Node* BuildNamedLoad();
   void BuildNamedStore(LanguageMode language_mode);
+  Node* BuildKeyedLoad();
   void BuildKeyedStore(LanguageMode language_mode);
   void BuildLdaLookupSlot(TypeofMode typeof_mode);
   void BuildStaLookupSlot(LanguageMode language_mode);
@@ -218,10 +219,6 @@
   const BytecodeBranchAnalysis* branch_analysis_;
   Environment* environment_;
 
-  // Indicates whether deoptimization support is enabled for this compilation
-  // and whether valid frame states need to be attached to deoptimizing nodes.
-  bool deoptimization_enabled_;
-
   // Merge environments are snapshots of the environment at points where the
   // control flow merges. This models a forward data flow propagation of all
   // values from all predecessors of the merge in question.
diff --git a/src/compiler/checkpoint-elimination.cc b/src/compiler/checkpoint-elimination.cc
new file mode 100644
index 0000000..d81e109
--- /dev/null
+++ b/src/compiler/checkpoint-elimination.cc
@@ -0,0 +1,43 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/checkpoint-elimination.h"
+
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+CheckpointElimination::CheckpointElimination(Editor* editor)
+    : AdvancedReducer(editor) {}
+
+namespace {
+
+// The given checkpoint is redundant if it is effect-wise dominated by another
+// checkpoint and there is no observable write in between. For now we consider
+// a linear effect chain only instead of true effect-wise dominance.
+bool IsRedundantCheckpoint(Node* node) {
+  Node* effect = NodeProperties::GetEffectInput(node);
+  while (effect->op()->HasProperty(Operator::kNoWrite) &&
+         effect->op()->EffectInputCount() == 1) {
+    if (effect->opcode() == IrOpcode::kCheckpoint) return true;
+    effect = NodeProperties::GetEffectInput(effect);
+  }
+  return false;
+}
+
+}  // namespace
+
+Reduction CheckpointElimination::Reduce(Node* node) {
+  if (node->opcode() != IrOpcode::kCheckpoint) return NoChange();
+  if (IsRedundantCheckpoint(node)) {
+    return Replace(NodeProperties::GetEffectInput(node));
+  }
+  return NoChange();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/checkpoint-elimination.h b/src/compiler/checkpoint-elimination.h
new file mode 100644
index 0000000..4d6aada
--- /dev/null
+++ b/src/compiler/checkpoint-elimination.h
@@ -0,0 +1,27 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CHECKPOINT_ELIMINATION_H_
+#define V8_COMPILER_CHECKPOINT_ELIMINATION_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Performs elimination of redundant checkpoints within the graph.
+class CheckpointElimination final : public AdvancedReducer {
+ public:
+  explicit CheckpointElimination(Editor* editor);
+  ~CheckpointElimination() final {}
+
+  Reduction Reduce(Node* node) final;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_CHECKPOINT_ELIMINATION_H_
diff --git a/src/compiler/coalesced-live-ranges.cc b/src/compiler/coalesced-live-ranges.cc
deleted file mode 100644
index 4ac3e21..0000000
--- a/src/compiler/coalesced-live-ranges.cc
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/coalesced-live-ranges.h"
-#include "src/compiler/greedy-allocator.h"
-#include "src/compiler/register-allocator.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-
-LiveRangeConflictIterator::LiveRangeConflictIterator(const LiveRange* range,
-                                                     IntervalStore* storage)
-    : query_(range->first_interval()),
-      pos_(storage->end()),
-      intervals_(storage) {
-  MovePosAndQueryToFirstConflict();
-}
-
-
-LiveRange* LiveRangeConflictIterator::Current() const {
-  if (IsFinished()) return nullptr;
-  return pos_->range_;
-}
-
-
-void LiveRangeConflictIterator::MovePosToFirstConflictForQuery() {
-  DCHECK_NOT_NULL(query_);
-  auto end = intervals_->end();
-  LifetimePosition q_start = query_->start();
-  LifetimePosition q_end = query_->end();
-
-  if (intervals_->empty() || intervals_->rbegin()->end_ <= q_start ||
-      intervals_->begin()->start_ >= q_end) {
-    pos_ = end;
-    return;
-  }
-
-  pos_ = intervals_->upper_bound(AsAllocatedInterval(q_start));
-  // pos is either at the end (no start strictly greater than q_start) or
-  // at some position with the aforementioned property. In either case, the
-  // allocated interval before this one may intersect our query:
-  // either because, although it starts before this query's start, it ends
-  // after; or because it starts exactly at the query start. So unless we're
-  // right at the beginning of the storage - meaning the first allocated
-  // interval is also starting after this query's start - see what's behind.
-  if (pos_ != intervals_->begin()) {
-    --pos_;
-    if (!QueryIntersectsAllocatedInterval()) {
-      // The interval behind wasn't intersecting, so move back.
-      ++pos_;
-    }
-  }
-  if (pos_ == end || !QueryIntersectsAllocatedInterval()) {
-    pos_ = end;
-  }
-}
-
-
-void LiveRangeConflictIterator::MovePosAndQueryToFirstConflict() {
-  auto end = intervals_->end();
-  for (; query_ != nullptr; query_ = query_->next()) {
-    MovePosToFirstConflictForQuery();
-    if (pos_ != end) {
-      DCHECK(QueryIntersectsAllocatedInterval());
-      return;
-    }
-  }
-
-  Invalidate();
-}
-
-
-void LiveRangeConflictIterator::IncrementPosAndSkipOverRepetitions() {
-  auto end = intervals_->end();
-  DCHECK(pos_ != end);
-  LiveRange* current_conflict = Current();
-  while (pos_ != end && pos_->range_ == current_conflict) {
-    ++pos_;
-  }
-}
-
-
-LiveRange* LiveRangeConflictIterator::InternalGetNext(bool clean_behind) {
-  if (IsFinished()) return nullptr;
-
-  LiveRange* to_clear = Current();
-  IncrementPosAndSkipOverRepetitions();
-  // At this point, pos_ is either at the end, or on an interval that doesn't
-  // correspond to the same range as to_clear. This interval may not even be
-  // a conflict.
-  if (clean_behind) {
-    // Since we parked pos_ on an iterator that won't be affected by removal,
-    // we can safely delete to_clear's intervals.
-    for (auto interval = to_clear->first_interval(); interval != nullptr;
-         interval = interval->next()) {
-      AllocatedInterval erase_key(interval->start(), interval->end(), nullptr);
-      intervals_->erase(erase_key);
-    }
-  }
-  // We may have parked pos_ at the end, or on a non-conflict. In that case,
-  // move to the next query and reinitialize pos and query. This may invalidate
-  // the iterator, if no more conflicts are available.
-  if (!QueryIntersectsAllocatedInterval()) {
-    query_ = query_->next();
-    MovePosAndQueryToFirstConflict();
-  }
-  return Current();
-}
-
-
-LiveRangeConflictIterator CoalescedLiveRanges::GetConflicts(
-    const LiveRange* range) {
-  return LiveRangeConflictIterator(range, &intervals());
-}
-
-
-void CoalescedLiveRanges::AllocateRange(LiveRange* range) {
-  for (auto interval = range->first_interval(); interval != nullptr;
-       interval = interval->next()) {
-    AllocatedInterval to_insert(interval->start(), interval->end(), range);
-    intervals().insert(to_insert);
-  }
-}
-
-
-bool CoalescedLiveRanges::VerifyAllocationsAreValidForTesting() const {
-  LifetimePosition last_end = LifetimePosition::GapFromInstructionIndex(0);
-  for (auto i : intervals_) {
-    if (i.start_ < last_end) {
-      return false;
-    }
-    last_end = i.end_;
-  }
-  return true;
-}
-
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/coalesced-live-ranges.h b/src/compiler/coalesced-live-ranges.h
deleted file mode 100644
index 54bbce2..0000000
--- a/src/compiler/coalesced-live-ranges.h
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COALESCED_LIVE_RANGES_H_
-#define V8_COALESCED_LIVE_RANGES_H_
-
-#include "src/compiler/register-allocator.h"
-#include "src/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-
-// Implementation detail for CoalescedLiveRanges.
-struct AllocatedInterval {
-  AllocatedInterval(LifetimePosition start, LifetimePosition end,
-                    LiveRange* range)
-      : start_(start), end_(end), range_(range) {}
-
-  LifetimePosition start_;
-  LifetimePosition end_;
-  LiveRange* range_;
-  bool operator<(const AllocatedInterval& other) const {
-    return start_ < other.start_;
-  }
-  bool operator>(const AllocatedInterval& other) const {
-    return start_ > other.start_;
-  }
-};
-typedef ZoneSet<AllocatedInterval> IntervalStore;
-
-
-// An iterator over conflicts of a live range, obtained from CoalescedLiveRanges
-// The design supports two main scenarios (see GreedyAllocator):
-// (1) observing each conflicting range, without mutating the allocations, and
-// (2) observing each conflicting range, and then moving to the next, after
-// removing the current conflict.
-class LiveRangeConflictIterator {
- public:
-  // Current conflict. nullptr if no conflicts, or if we reached the end of
-  // conflicts.
-  LiveRange* Current() const;
-
-  // Get the next conflict. Caller should handle non-consecutive repetitions of
-  // the same range.
-  LiveRange* GetNext() { return InternalGetNext(false); }
-
-  // Get the next conflict, after evicting the current one. Caller may expect
-  // to never observe the same live range more than once.
-  LiveRange* RemoveCurrentAndGetNext() { return InternalGetNext(true); }
-
- private:
-  friend class CoalescedLiveRanges;
-
-  typedef IntervalStore::const_iterator interval_iterator;
-  LiveRangeConflictIterator(const LiveRange* range, IntervalStore* store);
-
-  // Move the store iterator to  first interval intersecting query. Since the
-  // intervals are sorted, subsequent intervals intersecting query follow. May
-  // leave the store iterator at "end", meaning that the current query does not
-  // have an intersection.
-  void MovePosToFirstConflictForQuery();
-
-  // Move both query and store iterator to the first intersection, if any. If
-  // none, then it invalidates the iterator (IsFinished() == true)
-  void MovePosAndQueryToFirstConflict();
-
-  // Increment pos and skip over intervals belonging to the same range we
-  // started with (i.e. Current() before the call). It is possible that range
-  // will be seen again, but not consecutively.
-  void IncrementPosAndSkipOverRepetitions();
-
-  // Common implementation used by both GetNext as well as
-  // ClearCurrentAndGetNext.
-  LiveRange* InternalGetNext(bool clean_behind);
-
-  bool IsFinished() const { return query_ == nullptr; }
-
-  static AllocatedInterval AsAllocatedInterval(LifetimePosition pos) {
-    return AllocatedInterval(pos, LifetimePosition::Invalid(), nullptr);
-  }
-
-  // Intersection utilities.
-  static bool Intersects(LifetimePosition a_start, LifetimePosition a_end,
-                         LifetimePosition b_start, LifetimePosition b_end) {
-    return a_start < b_end && b_start < a_end;
-  }
-
-  bool QueryIntersectsAllocatedInterval() const {
-    DCHECK_NOT_NULL(query_);
-    return pos_ != intervals_->end() &&
-           Intersects(query_->start(), query_->end(), pos_->start_, pos_->end_);
-  }
-
-  void Invalidate() {
-    query_ = nullptr;
-    pos_ = intervals_->end();
-  }
-
-  const UseInterval* query_;
-  interval_iterator pos_;
-  IntervalStore* intervals_;
-};
-
-// Collection of live ranges allocated to the same register.
-// It supports efficiently finding all conflicts for a given, non-allocated
-// range. See AllocatedInterval.
-// Allocated live ranges do not intersect. At most, individual use intervals
-// touch. We store, for a live range, an AllocatedInterval corresponding to each
-// of that range's UseIntervals. We keep the list of AllocatedIntervals sorted
-// by starts. Then, given the non-intersecting property, we know that
-// consecutive AllocatedIntervals have the property that the "smaller"'s end is
-// less or equal to the "larger"'s start.
-// This allows for quick (logarithmic complexity) identification of the first
-// AllocatedInterval to conflict with a given LiveRange, and then for efficient
-// traversal of conflicts.
-class CoalescedLiveRanges : public ZoneObject {
- public:
-  explicit CoalescedLiveRanges(Zone* zone) : intervals_(zone) {}
-  void clear() { intervals_.clear(); }
-
-  bool empty() const { return intervals_.empty(); }
-
-  // Iterate over each live range conflicting with the provided one.
-  // The same live range may be observed multiple, but non-consecutive times.
-  LiveRangeConflictIterator GetConflicts(const LiveRange* range);
-
-
-  // Allocates a range with a pre-calculated candidate weight.
-  void AllocateRange(LiveRange* range);
-
-  // Unit testing API, verifying that allocated intervals do not overlap.
-  bool VerifyAllocationsAreValidForTesting() const;
-
- private:
-  static const float kAllocatedRangeMultiplier;
-
-  IntervalStore& intervals() { return intervals_; }
-  const IntervalStore& intervals() const { return intervals_; }
-
-  // Augment the weight of a range that is about to be allocated.
-  static void UpdateWeightAtAllocation(LiveRange* range);
-
-  // Reduce the weight of a range that has lost allocation.
-  static void UpdateWeightAtEviction(LiveRange* range);
-
-
-  IntervalStore intervals_;
-  DISALLOW_COPY_AND_ASSIGN(CoalescedLiveRanges);
-};
-
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
-#endif  // V8_COALESCED_LIVE_RANGES_H_
diff --git a/src/compiler/code-assembler.cc b/src/compiler/code-assembler.cc
index 081f28b..e598c09 100644
--- a/src/compiler/code-assembler.cc
+++ b/src/compiler/code-assembler.cc
@@ -19,6 +19,7 @@
 #include "src/interpreter/bytecodes.h"
 #include "src/machine-type.h"
 #include "src/macro-assembler.h"
+#include "src/utils.h"
 #include "src/zone.h"
 
 namespace v8 {
@@ -160,6 +161,28 @@
   return raw_assembler_->Return(value);
 }
 
+void CodeAssembler::DebugBreak() { raw_assembler_->DebugBreak(); }
+
+void CodeAssembler::Comment(const char* format, ...) {
+  if (!FLAG_code_comments) return;
+  char buffer[4 * KB];
+  StringBuilder builder(buffer, arraysize(buffer));
+  va_list arguments;
+  va_start(arguments, format);
+  builder.AddFormattedList(format, arguments);
+  va_end(arguments);
+
+  // Copy the string before recording it in the assembler to avoid
+  // issues when the stack allocated buffer goes out of scope.
+  const int prefix_len = 2;
+  int length = builder.position() + 1;
+  char* copy = reinterpret_cast<char*>(malloc(length + prefix_len));
+  MemCopy(copy + prefix_len, builder.Finalize(), length);
+  copy[0] = ';';
+  copy[1] = ' ';
+  raw_assembler_->Comment(copy);
+}
+
 void CodeAssembler::Bind(CodeAssembler::Label* label) { return label->Bind(); }
 
 Node* CodeAssembler::LoadFramePointer() {
@@ -392,6 +415,12 @@
                   result_size);
 }
 
+Node* CodeAssembler::CallStubN(Callable const& callable, Node** args,
+                               size_t result_size) {
+  Node* target = HeapConstant(callable.code());
+  return CallStubN(callable.descriptor(), target, args, result_size);
+}
+
 Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
                               Node* target, Node* context, Node* arg1,
                               size_t result_size) {
@@ -479,6 +508,16 @@
   return CallN(call_descriptor, target, args);
 }
 
+Node* CodeAssembler::CallStubN(const CallInterfaceDescriptor& descriptor,
+                               Node* target, Node** args, size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kNoFlags, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  return CallN(call_descriptor, target, args);
+}
+
 Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
                                   Node* arg1, Node* arg2, size_t result_size) {
   Node* target = HeapConstant(callable.code());
@@ -527,6 +566,25 @@
   return raw_assembler_->TailCallN(call_descriptor, target, args);
 }
 
+Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
+                                  Node* target, Node* context, Node* arg1,
+                                  Node* arg2, Node* arg3, Node* arg4,
+                                  size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  Node** args = zone()->NewArray<Node*>(5);
+  args[0] = arg1;
+  args[1] = arg2;
+  args[2] = arg3;
+  args[3] = arg4;
+  args[4] = context;
+
+  return raw_assembler_->TailCallN(call_descriptor, target, args);
+}
+
 Node* CodeAssembler::TailCallBytecodeDispatch(
     const CallInterfaceDescriptor& interface_descriptor,
     Node* code_target_address, Node** args) {
@@ -536,6 +594,66 @@
   return raw_assembler_->TailCallN(descriptor, code_target_address, args);
 }
 
+Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
+                            Node* function, Node* receiver,
+                            size_t result_size) {
+  const int argc = 0;
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), callable.descriptor(), argc + 1,
+      CallDescriptor::kNoFlags, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+  Node* target = HeapConstant(callable.code());
+
+  Node** args = zone()->NewArray<Node*>(argc + 4);
+  args[0] = function;
+  args[1] = Int32Constant(argc);
+  args[2] = receiver;
+  args[3] = context;
+
+  return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
+                            Node* function, Node* receiver, Node* arg1,
+                            size_t result_size) {
+  const int argc = 1;
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), callable.descriptor(), argc + 1,
+      CallDescriptor::kNoFlags, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+  Node* target = HeapConstant(callable.code());
+
+  Node** args = zone()->NewArray<Node*>(argc + 4);
+  args[0] = function;
+  args[1] = Int32Constant(argc);
+  args[2] = receiver;
+  args[3] = arg1;
+  args[4] = context;
+
+  return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
+                            Node* function, Node* receiver, Node* arg1,
+                            Node* arg2, size_t result_size) {
+  const int argc = 2;
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), callable.descriptor(), argc + 1,
+      CallDescriptor::kNoFlags, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+  Node* target = HeapConstant(callable.code());
+
+  Node** args = zone()->NewArray<Node*>(argc + 4);
+  args[0] = function;
+  args[1] = Int32Constant(argc);
+  args[2] = receiver;
+  args[3] = arg1;
+  args[4] = arg2;
+  args[5] = context;
+
+  return CallN(call_descriptor, target, args);
+}
+
 void CodeAssembler::Goto(CodeAssembler::Label* label) {
   label->MergeVariables();
   raw_assembler_->Goto(label->label_);
@@ -598,10 +716,12 @@
 
 CodeAssembler::Variable::Variable(CodeAssembler* assembler,
                                   MachineRepresentation rep)
-    : impl_(new (assembler->zone()) Impl(rep)) {
-  assembler->variables_.push_back(impl_);
+    : impl_(new (assembler->zone()) Impl(rep)), assembler_(assembler) {
+  assembler->variables_.insert(impl_);
 }
 
+CodeAssembler::Variable::~Variable() { assembler_->variables_.erase(impl_); }
+
 void CodeAssembler::Variable::Bind(Node* value) { impl_->value_ = value; }
 
 Node* CodeAssembler::Variable::value() const {
diff --git a/src/compiler/code-assembler.h b/src/compiler/code-assembler.h
index 39af56d..c33605c 100644
--- a/src/compiler/code-assembler.h
+++ b/src/compiler/code-assembler.h
@@ -72,6 +72,7 @@
   V(Float64Mul)                            \
   V(Float64Div)                            \
   V(Float64Mod)                            \
+  V(Float64Atan2)                          \
   V(Float64InsertLowWord32)                \
   V(Float64InsertHighWord32)               \
   V(IntPtrAdd)                             \
@@ -106,8 +107,20 @@
   V(Word64Ror)
 
 #define CODE_ASSEMBLER_UNARY_OP_LIST(V) \
+  V(Float64Atan)                        \
+  V(Float64Atanh)                       \
+  V(Float64Cos)                         \
+  V(Float64Exp)                         \
+  V(Float64Expm1)                       \
+  V(Float64Log)                         \
+  V(Float64Log1p)                       \
+  V(Float64Log2)                        \
+  V(Float64Log10)                       \
+  V(Float64Cbrt)                        \
   V(Float64Neg)                         \
+  V(Float64Sin)                         \
   V(Float64Sqrt)                        \
+  V(Float64Tan)                         \
   V(Float64ExtractLowWord32)            \
   V(Float64ExtractHighWord32)           \
   V(BitcastWordToTagged)                \
@@ -166,6 +179,7 @@
   class Variable {
    public:
     explicit Variable(CodeAssembler* assembler, MachineRepresentation rep);
+    ~Variable();
     void Bind(Node* value);
     Node* value() const;
     MachineRepresentation rep() const;
@@ -175,6 +189,7 @@
     friend class CodeAssembler;
     class Impl;
     Impl* impl_;
+    CodeAssembler* assembler_;
   };
 
   enum AllocationFlag : uint8_t {
@@ -208,6 +223,9 @@
   Node* Parameter(int value);
   void Return(Node* value);
 
+  void DebugBreak();
+  void Comment(const char* format, ...);
+
   void Bind(Label* label);
   void Goto(Label* label);
   void GotoIf(Node* condition, Label* true_label);
@@ -293,6 +311,8 @@
                  Node* arg2, size_t result_size = 1);
   Node* CallStub(Callable const& callable, Node* context, Node* arg1,
                  Node* arg2, Node* arg3, size_t result_size = 1);
+  Node* CallStubN(Callable const& callable, Node** args,
+                  size_t result_size = 1);
 
   Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
                  Node* context, Node* arg1, size_t result_size = 1);
@@ -307,6 +327,8 @@
   Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
                  Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
                  Node* arg5, size_t result_size = 1);
+  Node* CallStubN(const CallInterfaceDescriptor& descriptor, Node* target,
+                  Node** args, size_t result_size = 1);
 
   Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
                      Node* arg2, size_t result_size = 1);
@@ -318,10 +340,20 @@
   Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
                      Node* context, Node* arg1, Node* arg2, Node* arg3,
                      size_t result_size = 1);
+  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                     Node* context, Node* arg1, Node* arg2, Node* arg3,
+                     Node* arg4, size_t result_size = 1);
 
   Node* TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor,
                                  Node* code_target_address, Node** args);
 
+  Node* CallJS(Callable const& callable, Node* context, Node* function,
+               Node* receiver, size_t result_size = 1);
+  Node* CallJS(Callable const& callable, Node* context, Node* function,
+               Node* receiver, Node* arg1, size_t result_size = 1);
+  Node* CallJS(Callable const& callable, Node* context, Node* function,
+               Node* receiver, Node* arg1, Node* arg2, size_t result_size = 1);
+
   // Branching helpers.
   void BranchIf(Node* condition, Label* if_true, Label* if_false);
 
@@ -348,8 +380,6 @@
   virtual void CallEpilogue();
 
  private:
-  friend class CodeAssemblerTester;
-
   CodeAssembler(Isolate* isolate, Zone* zone, CallDescriptor* call_descriptor,
                 Code::Flags flags, const char* name);
 
@@ -360,7 +390,7 @@
   Code::Flags flags_;
   const char* name_;
   bool code_generated_;
-  ZoneVector<Variable::Impl*> variables_;
+  ZoneSet<Variable::Impl*> variables_;
 
   DISALLOW_COPY_AND_ASSIGN(CodeAssembler);
 };
diff --git a/src/compiler/code-generator-impl.h b/src/compiler/code-generator-impl.h
index adb8400..4e09a27 100644
--- a/src/compiler/code-generator-impl.h
+++ b/src/compiler/code-generator-impl.h
@@ -31,6 +31,10 @@
     return ToRegister(instr_->InputAt(index));
   }
 
+  FloatRegister InputFloatRegister(size_t index) {
+    return ToFloatRegister(instr_->InputAt(index));
+  }
+
   DoubleRegister InputDoubleRegister(size_t index) {
     return ToDoubleRegister(instr_->InputAt(index));
   }
@@ -89,6 +93,10 @@
     return ToRegister(instr_->TempAt(index));
   }
 
+  FloatRegister OutputFloatRegister() {
+    return ToFloatRegister(instr_->Output());
+  }
+
   DoubleRegister OutputDoubleRegister() {
     return ToDoubleRegister(instr_->Output());
   }
@@ -111,6 +119,10 @@
     return LocationOperand::cast(op)->GetDoubleRegister();
   }
 
+  FloatRegister ToFloatRegister(InstructionOperand* op) {
+    return LocationOperand::cast(op)->GetFloatRegister();
+  }
+
   Constant ToConstant(InstructionOperand* op) {
     if (op->IsImmediate()) {
       return gen_->code()->GetImmediate(ImmediateOperand::cast(op));
diff --git a/src/compiler/code-generator.cc b/src/compiler/code-generator.cc
index 5cf9d97..f388659 100644
--- a/src/compiler/code-generator.cc
+++ b/src/compiler/code-generator.cc
@@ -399,10 +399,10 @@
   if (source_position.IsUnknown()) return;
   int code_pos = source_position.raw();
   masm()->positions_recorder()->RecordPosition(code_pos);
-  masm()->positions_recorder()->WriteRecordedPositions();
   if (FLAG_code_comments) {
-    Vector<char> buffer = Vector<char>::New(256);
     CompilationInfo* info = this->info();
+    if (!info->parse_info()) return;
+    Vector<char> buffer = Vector<char>::New(256);
     int ln = Script::GetLineNumber(info->script(), code_pos);
     int cn = Script::GetColumnNumber(info->script(), code_pos);
     if (info->script()->name()->IsString()) {
@@ -716,8 +716,12 @@
       CHECK(false);
     }
   } else if (op->IsFPStackSlot()) {
-    DCHECK(IsFloatingPoint(type.representation()));
-    translation->StoreDoubleStackSlot(LocationOperand::cast(op)->index());
+    if (type.representation() == MachineRepresentation::kFloat64) {
+      translation->StoreDoubleStackSlot(LocationOperand::cast(op)->index());
+    } else {
+      DCHECK_EQ(MachineRepresentation::kFloat32, type.representation());
+      translation->StoreFloatStackSlot(LocationOperand::cast(op)->index());
+    }
   } else if (op->IsRegister()) {
     InstructionOperandConverter converter(this, instr);
     if (type.representation() == MachineRepresentation::kBit) {
@@ -734,9 +738,13 @@
       CHECK(false);
     }
   } else if (op->IsFPRegister()) {
-    DCHECK(IsFloatingPoint(type.representation()));
     InstructionOperandConverter converter(this, instr);
-    translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
+    if (type.representation() == MachineRepresentation::kFloat64) {
+      translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
+    } else {
+      DCHECK_EQ(MachineRepresentation::kFloat32, type.representation());
+      translation->StoreFloatRegister(converter.ToFloatRegister(op));
+    }
   } else if (op->IsImmediate()) {
     InstructionOperandConverter converter(this, instr);
     Constant constant = converter.ToConstant(op);
diff --git a/src/compiler/common-node-cache.h b/src/compiler/common-node-cache.h
index cee0c4e..1f07703 100644
--- a/src/compiler/common-node-cache.h
+++ b/src/compiler/common-node-cache.h
@@ -52,12 +52,14 @@
 
   Node** FindHeapConstant(Handle<HeapObject> value);
 
-  Node** FindRelocatableInt32Constant(int32_t value) {
-    return relocatable_int32_constants_.Find(zone(), value);
+  Node** FindRelocatableInt32Constant(int32_t value, RelocInfoMode rmode) {
+    return relocatable_int32_constants_.Find(zone(),
+                                             std::make_pair(value, rmode));
   }
 
-  Node** FindRelocatableInt64Constant(int64_t value) {
-    return relocatable_int64_constants_.Find(zone(), value);
+  Node** FindRelocatableInt64Constant(int64_t value, RelocInfoMode rmode) {
+    return relocatable_int64_constants_.Find(zone(),
+                                             std::make_pair(value, rmode));
   }
 
   // Return all nodes from the cache.
@@ -73,8 +75,8 @@
   IntPtrNodeCache external_constants_;
   Int64NodeCache number_constants_;
   IntPtrNodeCache heap_constants_;
-  Int32NodeCache relocatable_int32_constants_;
-  Int64NodeCache relocatable_int64_constants_;
+  RelocInt32NodeCache relocatable_int32_constants_;
+  RelocInt64NodeCache relocatable_int64_constants_;
   Zone* const zone_;
 
   DISALLOW_COPY_AND_ASSIGN(CommonNodeCache);
diff --git a/src/compiler/common-operator-reducer.cc b/src/compiler/common-operator-reducer.cc
index 2f48683..5c3d3d7 100644
--- a/src/compiler/common-operator-reducer.cc
+++ b/src/compiler/common-operator-reducer.cc
@@ -19,8 +19,6 @@
 
 namespace {
 
-enum class Decision { kUnknown, kTrue, kFalse };
-
 Decision DecideCondition(Node* const cond) {
   switch (cond->opcode()) {
     case IrOpcode::kInt32Constant: {
@@ -142,13 +140,14 @@
   Decision const decision = DecideCondition(condition);
   if (decision == Decision::kUnknown) return NoChange();
   if (condition_is_true == (decision == Decision::kTrue)) {
-    return Replace(control);
+    ReplaceWithValue(node, dead(), effect, control);
+  } else {
+    control = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+                               frame_state, effect, control);
+    // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+    NodeProperties::MergeControlToEnd(graph(), common(), control);
+    Revisit(graph()->end());
   }
-  control = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
-                             frame_state, effect, control);
-  // TODO(bmeurer): This should be on the AdvancedReducer somehow.
-  NodeProperties::MergeControlToEnd(graph(), common(), control);
-  Revisit(graph()->end());
   return Replace(dead());
 }
 
diff --git a/src/compiler/common-operator.cc b/src/compiler/common-operator.cc
index d3f6972..4f5ead8 100644
--- a/src/compiler/common-operator.cc
+++ b/src/compiler/common-operator.cc
@@ -167,10 +167,44 @@
   return os << p.value() << "|" << p.rmode() << "|" << p.type();
 }
 
+size_t hash_value(RegionObservability observability) {
+  return static_cast<size_t>(observability);
+}
+
+std::ostream& operator<<(std::ostream& os, RegionObservability observability) {
+  switch (observability) {
+    case RegionObservability::kObservable:
+      return os << "observable";
+    case RegionObservability::kNotObservable:
+      return os << "not-observable";
+  }
+  UNREACHABLE();
+  return os;
+}
+
+RegionObservability RegionObservabilityOf(Operator const* op) {
+  DCHECK_EQ(IrOpcode::kBeginRegion, op->opcode());
+  return OpParameter<RegionObservability>(op);
+}
+
+std::ostream& operator<<(std::ostream& os,
+                         const ZoneVector<MachineType>* types) {
+  // Print all the MachineTypes, separated by commas.
+  bool first = true;
+  for (MachineType elem : *types) {
+    if (!first) {
+      os << ", ";
+    }
+    first = false;
+    os << elem;
+  }
+  return os;
+}
+
 #define CACHED_OP_LIST(V)                                    \
   V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1)             \
-  V(DeoptimizeIf, Operator::kFoldable, 2, 1, 1, 0, 0, 1)     \
-  V(DeoptimizeUnless, Operator::kFoldable, 2, 1, 1, 0, 0, 1) \
+  V(DeoptimizeIf, Operator::kFoldable, 2, 1, 1, 0, 1, 1)     \
+  V(DeoptimizeUnless, Operator::kFoldable, 2, 1, 1, 0, 1, 1) \
   V(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1)            \
   V(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1)           \
   V(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1)         \
@@ -179,9 +213,8 @@
   V(Terminate, Operator::kKontrol, 0, 1, 1, 0, 0, 1)         \
   V(OsrNormalEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1)   \
   V(OsrLoopEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1)     \
-  V(CheckPoint, Operator::kKontrol, 1, 1, 1, 0, 1, 0)        \
-  V(BeginRegion, Operator::kNoThrow, 0, 1, 0, 0, 1, 0)       \
-  V(FinishRegion, Operator::kNoThrow, 1, 1, 0, 1, 1, 0)
+  V(Checkpoint, Operator::kKontrol, 0, 1, 1, 0, 1, 0)        \
+  V(FinishRegion, Operator::kKontrol, 1, 1, 0, 1, 1, 0)
 
 #define CACHED_RETURN_LIST(V) \
   V(1)                        \
@@ -360,6 +393,20 @@
   CACHED_EFFECT_PHI_LIST(CACHED_EFFECT_PHI)
 #undef CACHED_EFFECT_PHI
 
+  template <RegionObservability kRegionObservability>
+  struct BeginRegionOperator final : public Operator1<RegionObservability> {
+    BeginRegionOperator()
+        : Operator1<RegionObservability>(                  // --
+              IrOpcode::kBeginRegion, Operator::kKontrol,  // opcode
+              "BeginRegion",                               // name
+              0, 1, 0, 0, 1, 0,                            // counts
+              kRegionObservability) {}                     // parameter
+  };
+  BeginRegionOperator<RegionObservability::kObservable>
+      kBeginRegionObservableOperator;
+  BeginRegionOperator<RegionObservability::kNotObservable>
+      kBeginRegionNotObservableOperator;
+
   template <size_t kInputCount>
   struct LoopOperator final : public Operator {
     LoopOperator()
@@ -422,7 +469,7 @@
               IrOpcode::kProjection,  // opcode
               Operator::kPure,        // flags
               "Projection",           // name
-              1, 0, 0, 1, 0, 0,       // counts,
+              1, 0, 1, 1, 0, 0,       // counts,
               kIndex) {}              // parameter
   };
 #define CACHED_PROJECTION(index) \
@@ -759,6 +806,17 @@
       0, effect_input_count, 1, 0, 1, 0);     // counts
 }
 
+const Operator* CommonOperatorBuilder::BeginRegion(
+    RegionObservability region_observability) {
+  switch (region_observability) {
+    case RegionObservability::kObservable:
+      return &cache_.kBeginRegionObservableOperator;
+    case RegionObservability::kNotObservable:
+      return &cache_.kBeginRegionNotObservableOperator;
+  }
+  UNREACHABLE();
+  return nullptr;
+}
 
 const Operator* CommonOperatorBuilder::StateValues(int arguments) {
   switch (arguments) {
@@ -857,12 +915,12 @@
       break;
   }
   // Uncached.
-  return new (zone()) Operator1<size_t>(         // --
-      IrOpcode::kProjection,                     // opcode
-      Operator::kFoldable | Operator::kNoThrow,  // flags
-      "Projection",                              // name
-      1, 0, 0, 1, 0, 0,                          // counts
-      index);                                    // parameter
+  return new (zone()) Operator1<size_t>(  // --
+      IrOpcode::kProjection,              // opcode
+      Operator::kPure,                    // flags
+      "Projection",                       // name
+      1, 0, 1, 1, 0, 0,                   // counts
+      index);                             // parameter
 }
 
 
diff --git a/src/compiler/common-operator.h b/src/compiler/common-operator.h
index c2a7a37..77d53de 100644
--- a/src/compiler/common-operator.h
+++ b/src/compiler/common-operator.h
@@ -134,9 +134,25 @@
                 RelocatablePtrConstantInfo const& rhs);
 bool operator!=(RelocatablePtrConstantInfo const& lhs,
                 RelocatablePtrConstantInfo const& rhs);
+
 std::ostream& operator<<(std::ostream&, RelocatablePtrConstantInfo const&);
+
 size_t hash_value(RelocatablePtrConstantInfo const& p);
 
+// Used to mark a region (as identified by BeginRegion/FinishRegion) as either
+// JavaScript-observable or not (i.e. allocations are not JavaScript observable
+// themselves, but transitioning stores are).
+enum class RegionObservability : uint8_t { kObservable, kNotObservable };
+
+size_t hash_value(RegionObservability);
+
+std::ostream& operator<<(std::ostream&, RegionObservability);
+
+RegionObservability RegionObservabilityOf(Operator const*) WARN_UNUSED_RESULT;
+
+std::ostream& operator<<(std::ostream& os,
+                         const ZoneVector<MachineType>* types);
+
 // Interface for building common operators that can be used at any level of IR,
 // including JavaScript, mid-level, and low-level.
 class CommonOperatorBuilder final : public ZoneObject {
@@ -186,8 +202,8 @@
   const Operator* Phi(MachineRepresentation representation,
                       int value_input_count);
   const Operator* EffectPhi(int effect_input_count);
-  const Operator* CheckPoint();
-  const Operator* BeginRegion();
+  const Operator* Checkpoint();
+  const Operator* BeginRegion(RegionObservability);
   const Operator* FinishRegion();
   const Operator* StateValues(int arguments);
   const Operator* ObjectState(int pointer_slots, int id);
diff --git a/src/compiler/effect-control-linearizer.cc b/src/compiler/effect-control-linearizer.cc
index 716723b..b7f6b12 100644
--- a/src/compiler/effect-control-linearizer.cc
+++ b/src/compiler/effect-control-linearizer.cc
@@ -37,6 +37,7 @@
 struct BlockEffectControlData {
   Node* current_effect = nullptr;  // New effect.
   Node* current_control = nullptr;  // New control.
+  Node* current_frame_state = nullptr;  // New frame state.
 };
 
 // Effect phis that need to be updated after the first pass.
@@ -222,10 +223,30 @@
       NodeProperties::ReplaceEffectInput(terminate, effect);
     }
 
+    // The frame state at block entry is determined by the frame states leaving
+    // all predecessors. In case there is no frame state dominating this block,
+    // we can rely on a checkpoint being present before the next deoptimization.
+    // TODO(mstarzinger): Eventually we will need to go hunt for a frame state
+    // once deoptimizing nodes roam freely through the schedule.
+    Node* frame_state = nullptr;
+    if (block != schedule()->start()) {
+      // If all the predecessors have the same effect, we can use it
+      // as our current effect.
+      int rpo_number = block->PredecessorAt(0)->rpo_number();
+      frame_state = block_effects[rpo_number].current_frame_state;
+      for (size_t i = 1; i < block->PredecessorCount(); i++) {
+        int rpo_number = block->PredecessorAt(i)->rpo_number();
+        if (block_effects[rpo_number].current_frame_state != frame_state) {
+          frame_state = nullptr;
+          break;
+        }
+      }
+    }
+
     // Process the ordinary instructions.
     for (; instr < block->NodeCount(); instr++) {
       Node* node = block->NodeAt(instr);
-      ProcessNode(node, &effect, &control);
+      ProcessNode(node, &frame_state, &effect, &control);
     }
 
     switch (block->control()) {
@@ -240,13 +261,14 @@
       case BasicBlock::kReturn:
       case BasicBlock::kDeoptimize:
       case BasicBlock::kThrow:
-        ProcessNode(block->control_input(), &effect, &control);
+        ProcessNode(block->control_input(), &frame_state, &effect, &control);
         break;
     }
 
     // Store the effect for later use.
     block_effects[block->rpo_number()].current_effect = effect;
     block_effects[block->rpo_number()].current_control = control;
+    block_effects[block->rpo_number()].current_frame_state = frame_state;
   }
 
   // Update the incoming edges of the effect phis that could not be processed
@@ -276,29 +298,49 @@
 
 }  // namespace
 
-void EffectControlLinearizer::ProcessNode(Node* node, Node** effect,
-                                          Node** control) {
+void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state,
+                                          Node** effect, Node** control) {
   // If the node needs to be wired into the effect/control chain, do this
-  // here.
-  if (TryWireInStateEffect(node, effect, control)) {
+  // here. Pass current frame state for lowering to eager deoptimization.
+  if (TryWireInStateEffect(node, *frame_state, effect, control)) {
     return;
   }
 
+  // If the node has a visible effect, then there must be a checkpoint in the
+  // effect chain before we are allowed to place another eager deoptimization
+  // point. We zap the frame state to ensure this invariant is maintained.
+  if (region_observability_ == RegionObservability::kObservable &&
+      !node->op()->HasProperty(Operator::kNoWrite)) {
+    *frame_state = nullptr;
+  }
+
   // Remove the end markers of 'atomic' allocation region because the
   // region should be wired-in now.
-  if (node->opcode() == IrOpcode::kFinishRegion ||
-      node->opcode() == IrOpcode::kBeginRegion) {
+  if (node->opcode() == IrOpcode::kFinishRegion) {
+    // Reset the current region observability.
+    region_observability_ = RegionObservability::kObservable;
+    // Update the value uses to the value input of the finish node and
+    // the effect uses to the effect input.
+    return RemoveRegionNode(node);
+  }
+  if (node->opcode() == IrOpcode::kBeginRegion) {
+    // Determine the observability for this region and use that for all
+    // nodes inside the region (i.e. ignore the absence of kNoWrite on
+    // StoreField and other operators).
+    DCHECK_NE(RegionObservability::kNotObservable, region_observability_);
+    region_observability_ = RegionObservabilityOf(node->op());
     // Update the value uses to the value input of the finish node and
     // the effect uses to the effect input.
     return RemoveRegionNode(node);
   }
 
-  // Special treatment for CheckPoint nodes.
-  // TODO(epertoso): Pickup the current frame state.
-  if (node->opcode() == IrOpcode::kCheckPoint) {
+  // Special treatment for checkpoint nodes.
+  if (node->opcode() == IrOpcode::kCheckpoint) {
     // Unlink the check point; effect uses will be updated to the incoming
-    // effect that is passed.
-    node->Kill();
+    // effect that is passed. The frame state is preserved for lowering.
+    DCHECK_EQ(RegionObservability::kObservable, region_observability_);
+    *frame_state = NodeProperties::GetFrameStateInput(node, 0);
+    node->TrimInputCount(0);
     return;
   }
 
@@ -347,7 +389,9 @@
   }
 }
 
-bool EffectControlLinearizer::TryWireInStateEffect(Node* node, Node** effect,
+bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
+                                                   Node* frame_state,
+                                                   Node** effect,
                                                    Node** control) {
   ValueEffectControl state(nullptr, nullptr, nullptr);
   switch (node->opcode()) {
@@ -384,6 +428,36 @@
     case IrOpcode::kChangeTaggedToFloat64:
       state = LowerChangeTaggedToFloat64(node, *effect, *control);
       break;
+    case IrOpcode::kTruncateTaggedToFloat64:
+      state = LowerTruncateTaggedToFloat64(node, *effect, *control);
+      break;
+    case IrOpcode::kCheckBounds:
+      state = LowerCheckBounds(node, frame_state, *effect, *control);
+      break;
+    case IrOpcode::kCheckTaggedPointer:
+      state = LowerCheckTaggedPointer(node, frame_state, *effect, *control);
+      break;
+    case IrOpcode::kCheckTaggedSigned:
+      state = LowerCheckTaggedSigned(node, frame_state, *effect, *control);
+      break;
+    case IrOpcode::kCheckedInt32Add:
+      state = LowerCheckedInt32Add(node, frame_state, *effect, *control);
+      break;
+    case IrOpcode::kCheckedInt32Sub:
+      state = LowerCheckedInt32Sub(node, frame_state, *effect, *control);
+      break;
+    case IrOpcode::kCheckedUint32ToInt32:
+      state = LowerCheckedUint32ToInt32(node, frame_state, *effect, *control);
+      break;
+    case IrOpcode::kCheckedFloat64ToInt32:
+      state = LowerCheckedFloat64ToInt32(node, frame_state, *effect, *control);
+      break;
+    case IrOpcode::kCheckedTaggedToInt32:
+      state = LowerCheckedTaggedToInt32(node, frame_state, *effect, *control);
+      break;
+    case IrOpcode::kCheckedTaggedToFloat64:
+      state = LowerCheckedTaggedToFloat64(node, frame_state, *effect, *control);
+      break;
     case IrOpcode::kTruncateTaggedToWord32:
       state = LowerTruncateTaggedToWord32(node, *effect, *control);
       break;
@@ -405,10 +479,28 @@
     case IrOpcode::kObjectIsUndetectable:
       state = LowerObjectIsUndetectable(node, *effect, *control);
       break;
+    case IrOpcode::kStringFromCharCode:
+      state = LowerStringFromCharCode(node, *effect, *control);
+      break;
+    case IrOpcode::kCheckFloat64Hole:
+      state = LowerCheckFloat64Hole(node, frame_state, *effect, *control);
+      break;
+    case IrOpcode::kCheckTaggedHole:
+      state = LowerCheckTaggedHole(node, frame_state, *effect, *control);
+      break;
+    case IrOpcode::kPlainPrimitiveToNumber:
+      state = LowerPlainPrimitiveToNumber(node, *effect, *control);
+      break;
+    case IrOpcode::kPlainPrimitiveToWord32:
+      state = LowerPlainPrimitiveToWord32(node, *effect, *control);
+      break;
+    case IrOpcode::kPlainPrimitiveToFloat64:
+      state = LowerPlainPrimitiveToFloat64(node, *effect, *control);
+      break;
     default:
       return false;
   }
-  NodeProperties::ReplaceUses(node, state.value);
+  NodeProperties::ReplaceUses(node, state.value, state.effect, state.control);
   *effect = state.effect;
   *control = state.control;
   return true;
@@ -465,10 +557,11 @@
   if (machine()->Is64()) {
     vsmi = ChangeInt32ToSmi(value32);
   } else {
-    Node* smi_tag =
-        graph()->NewNode(machine()->Int32AddWithOverflow(), value32, value32);
+    Node* smi_tag = graph()->NewNode(machine()->Int32AddWithOverflow(), value32,
+                                     value32, if_smi);
 
-    Node* check_ovf = graph()->NewNode(common()->Projection(1), smi_tag);
+    Node* check_ovf =
+        graph()->NewNode(common()->Projection(1), smi_tag, if_smi);
     Node* branch_ovf = graph()->NewNode(common()->Branch(BranchHint::kFalse),
                                         check_ovf, if_smi);
 
@@ -476,7 +569,7 @@
     if_box = graph()->NewNode(common()->Merge(2), if_ovf, if_box);
 
     if_smi = graph()->NewNode(common()->IfFalse(), branch_ovf);
-    vsmi = graph()->NewNode(common()->Projection(0), smi_tag);
+    vsmi = graph()->NewNode(common()->Projection(0), smi_tag, if_smi);
   }
 
   // Allocate the box for the {value}.
@@ -528,9 +621,10 @@
     return ValueEffectControl(ChangeInt32ToSmi(value), effect, control);
   }
 
-  Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), value, value);
+  Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), value, value,
+                               control);
 
-  Node* ovf = graph()->NewNode(common()->Projection(1), add);
+  Node* ovf = graph()->NewNode(common()->Projection(1), add, control);
   Node* branch =
       graph()->NewNode(common()->Branch(BranchHint::kFalse), ovf, control);
 
@@ -539,7 +633,7 @@
       AllocateHeapNumberWithValue(ChangeInt32ToFloat64(value), effect, if_true);
 
   Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* vfalse = graph()->NewNode(common()->Projection(0), add);
+  Node* vfalse = graph()->NewNode(common()->Projection(0), add, if_false);
 
   Node* merge = graph()->NewNode(common()->Merge(2), alloc.control, if_false);
   Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
@@ -661,6 +755,12 @@
 EffectControlLinearizer::ValueEffectControl
 EffectControlLinearizer::LowerChangeTaggedToFloat64(Node* node, Node* effect,
                                                     Node* control) {
+  return LowerTruncateTaggedToFloat64(node, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node, Node* effect,
+                                                      Node* control) {
   Node* value = node->InputAt(0);
 
   Node* check = ObjectIsSmi(value);
@@ -694,6 +794,288 @@
 }
 
 EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckBounds(Node* node, Node* frame_state,
+                                          Node* effect, Node* control) {
+  Node* index = node->InputAt(0);
+  Node* limit = node->InputAt(1);
+
+  Node* check = graph()->NewNode(machine()->Uint32LessThan(), index, limit);
+  control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                      frame_state, effect, control);
+
+  // Make sure the lowered node does not appear in any use lists.
+  node->TrimInputCount(0);
+
+  return ValueEffectControl(index, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckTaggedPointer(Node* node, Node* frame_state,
+                                                 Node* effect, Node* control) {
+  Node* value = node->InputAt(0);
+
+  Node* check = ObjectIsSmi(value);
+  control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
+                                      frame_state, effect, control);
+
+  // Make sure the lowered node does not appear in any use lists.
+  node->TrimInputCount(0);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckTaggedSigned(Node* node, Node* frame_state,
+                                                Node* effect, Node* control) {
+  Node* value = node->InputAt(0);
+
+  Node* check = ObjectIsSmi(value);
+  control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                      frame_state, effect, control);
+
+  // Make sure the lowered node does not appear in any use lists.
+  node->TrimInputCount(0);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedInt32Add(Node* node, Node* frame_state,
+                                              Node* effect, Node* control) {
+  Node* lhs = node->InputAt(0);
+  Node* rhs = node->InputAt(1);
+
+  Node* value =
+      graph()->NewNode(machine()->Int32AddWithOverflow(), lhs, rhs, control);
+
+  Node* check = graph()->NewNode(common()->Projection(1), value, control);
+  control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
+                                      frame_state, effect, control);
+
+  value = graph()->NewNode(common()->Projection(0), value, control);
+
+  // Make sure the lowered node does not appear in any use lists.
+  node->TrimInputCount(0);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedInt32Sub(Node* node, Node* frame_state,
+                                              Node* effect, Node* control) {
+  Node* lhs = node->InputAt(0);
+  Node* rhs = node->InputAt(1);
+
+  Node* value =
+      graph()->NewNode(machine()->Int32SubWithOverflow(), lhs, rhs, control);
+
+  Node* check = graph()->NewNode(common()->Projection(1), value, control);
+  control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
+                                      frame_state, effect, control);
+
+  value = graph()->NewNode(common()->Projection(0), value, control);
+
+  // Make sure the lowered node does not appear in any use lists.
+  node->TrimInputCount(0);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
+                                                   Node* frame_state,
+                                                   Node* effect,
+                                                   Node* control) {
+  Node* value = node->InputAt(0);
+  Node* max_int = jsgraph()->Int32Constant(std::numeric_limits<int32_t>::max());
+  Node* is_safe =
+      graph()->NewNode(machine()->Uint32LessThanOrEqual(), value, max_int);
+  control = effect = graph()->NewNode(common()->DeoptimizeUnless(), is_safe,
+                                      frame_state, effect, control);
+
+  // Make sure the lowered node does not appear in any use lists.
+  node->TrimInputCount(0);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::BuildCheckedFloat64ToInt32(Node* value,
+                                                    Node* frame_state,
+                                                    Node* effect,
+                                                    Node* control) {
+  Node* value32 = graph()->NewNode(machine()->RoundFloat64ToInt32(), value);
+  Node* check_same = graph()->NewNode(
+      machine()->Float64Equal(), value,
+      graph()->NewNode(machine()->ChangeInt32ToFloat64(), value32));
+  control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check_same,
+                                      frame_state, effect, control);
+
+  // Check if {value} is -0.
+  Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value32,
+                                      jsgraph()->Int32Constant(0));
+  Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                       check_zero, control);
+
+  Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
+  Node* if_notzero = graph()->NewNode(common()->IfFalse(), branch_zero);
+
+  // In case of 0, we need to check the high bits for the IEEE -0 pattern.
+  Node* check_negative = graph()->NewNode(
+      machine()->Int32LessThan(),
+      graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
+      jsgraph()->Int32Constant(0));
+
+  Node* deopt_minus_zero = graph()->NewNode(
+      common()->DeoptimizeIf(), check_negative, frame_state, effect, if_zero);
+
+  Node* merge =
+      graph()->NewNode(common()->Merge(2), deopt_minus_zero, if_notzero);
+
+  effect =
+      graph()->NewNode(common()->EffectPhi(2), deopt_minus_zero, effect, merge);
+
+  return ValueEffectControl(value32, effect, merge);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedFloat64ToInt32(Node* node,
+                                                    Node* frame_state,
+                                                    Node* effect,
+                                                    Node* control) {
+  Node* value = node->InputAt(0);
+
+  // Make sure the lowered node does not appear in any use lists.
+  node->TrimInputCount(0);
+
+  return BuildCheckedFloat64ToInt32(value, frame_state, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
+                                                   Node* frame_state,
+                                                   Node* effect,
+                                                   Node* control) {
+  Node* value = node->InputAt(0);
+
+  Node* check = ObjectIsSmi(value);
+  Node* branch =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+  // In the Smi case, just convert to int32.
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* etrue = effect;
+  Node* vtrue = ChangeSmiToInt32(value);
+
+  // In the non-Smi case, check the heap numberness, load the number and convert
+  // to int32.
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* efalse = effect;
+  Node* vfalse;
+  {
+    Node* value_map = efalse =
+        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+                         value, efalse, if_false);
+    Node* check = graph()->NewNode(machine()->WordEqual(), value_map,
+                                   jsgraph()->HeapNumberMapConstant());
+    if_false = efalse = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                         frame_state, efalse, if_false);
+    vfalse = efalse = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
+        efalse, if_false);
+    ValueEffectControl state =
+        BuildCheckedFloat64ToInt32(vfalse, frame_state, efalse, if_false);
+    if_false = state.control;
+    efalse = state.effect;
+    vfalse = state.value;
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+  value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+                           vtrue, vfalse, control);
+
+  // Make sure the lowered node does not appear in any use lists.
+  node->TrimInputCount(0);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
+    Node* value, Node* frame_state, Node* effect, Node* control) {
+  Node* value_map = effect = graph()->NewNode(
+      simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
+  Node* check_number = graph()->NewNode(machine()->WordEqual(), value_map,
+                                        jsgraph()->HeapNumberMapConstant());
+
+  Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+                                  check_number, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* etrue = effect;
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  // For oddballs also contain the numeric value, let us just check that
+  // we have an oddball here.
+  Node* efalse = effect;
+  Node* instance_type = efalse = graph()->NewNode(
+      simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
+      efalse, if_false);
+  Node* check_oddball =
+      graph()->NewNode(machine()->Word32Equal(), instance_type,
+                       jsgraph()->Int32Constant(ODDBALL_TYPE));
+  if_false = efalse =
+      graph()->NewNode(common()->DeoptimizeUnless(), check_oddball, frame_state,
+                       efalse, if_false);
+  STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+
+  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+
+  Node* result = effect = graph()->NewNode(
+      simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
+      effect, control);
+  return ValueEffectControl(result, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
+                                                     Node* frame_state,
+                                                     Node* effect,
+                                                     Node* control) {
+  Node* value = node->InputAt(0);
+
+  Node* check = ObjectIsSmi(value);
+  Node* branch =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+  // In the Smi case, just convert to int32 and then float64.
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* etrue = effect;
+  Node* vtrue = ChangeSmiToInt32(value);
+  vtrue = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue);
+
+  // Otherwise, check heap numberness and load the number.
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  ValueEffectControl number_state = BuildCheckedHeapNumberOrOddballToFloat64(
+      value, frame_state, effect, if_false);
+
+  Node* merge =
+      graph()->NewNode(common()->Merge(2), if_true, number_state.control);
+  Node* effect_phi = graph()->NewNode(common()->EffectPhi(2), etrue,
+                                      number_state.effect, merge);
+  Node* result =
+      graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2), vtrue,
+                       number_state.value, merge);
+
+  // Make sure the lowered node does not appear in any use lists.
+  node->TrimInputCount(0);
+
+  return ValueEffectControl(result, effect_phi, merge);
+}
+
+EffectControlLinearizer::ValueEffectControl
 EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node, Node* effect,
                                                      Node* control) {
   Node* value = node->InputAt(0);
@@ -918,6 +1300,170 @@
 }
 
 EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerStringFromCharCode(Node* node, Node* effect,
+                                                 Node* control) {
+  Node* value = node->InputAt(0);
+
+  // Compute the character code.
+  Node* code =
+      graph()->NewNode(machine()->Word32And(), value,
+                       jsgraph()->Int32Constant(String::kMaxUtf16CodeUnit));
+
+  // Check if the {code} is a one-byte char code.
+  Node* check0 =
+      graph()->NewNode(machine()->Int32LessThanOrEqual(), code,
+                       jsgraph()->Int32Constant(String::kMaxOneByteCharCode));
+  Node* branch0 =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* etrue0 = effect;
+  Node* vtrue0;
+  {
+    // Load the isolate wide single character string cache.
+    Node* cache =
+        jsgraph()->HeapConstant(factory()->single_character_string_cache());
+
+    // Compute the {cache} index for {code}.
+    Node* index =
+        machine()->Is32() ? code : graph()->NewNode(
+                                       machine()->ChangeUint32ToUint64(), code);
+
+    // Check if we have an entry for the {code} in the single character string
+    // cache already.
+    Node* entry = etrue0 = graph()->NewNode(
+        simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()), cache,
+        index, etrue0, if_true0);
+
+    Node* check1 = graph()->NewNode(machine()->WordEqual(), entry,
+                                    jsgraph()->UndefinedConstant());
+    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                     check1, if_true0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* etrue1 = etrue0;
+    Node* vtrue1;
+    {
+      // Allocate a new SeqOneByteString for {code}.
+      vtrue1 = etrue1 = graph()->NewNode(
+          simplified()->Allocate(NOT_TENURED),
+          jsgraph()->Int32Constant(SeqOneByteString::SizeFor(1)), etrue1,
+          if_true1);
+      etrue1 = graph()->NewNode(
+          simplified()->StoreField(AccessBuilder::ForMap()), vtrue1,
+          jsgraph()->HeapConstant(factory()->one_byte_string_map()), etrue1,
+          if_true1);
+      etrue1 = graph()->NewNode(
+          simplified()->StoreField(AccessBuilder::ForNameHashField()), vtrue1,
+          jsgraph()->IntPtrConstant(Name::kEmptyHashField), etrue1, if_true1);
+      etrue1 = graph()->NewNode(
+          simplified()->StoreField(AccessBuilder::ForStringLength()), vtrue1,
+          jsgraph()->SmiConstant(1), etrue1, if_true1);
+      etrue1 = graph()->NewNode(
+          machine()->Store(StoreRepresentation(MachineRepresentation::kWord8,
+                                               kNoWriteBarrier)),
+          vtrue1, jsgraph()->IntPtrConstant(SeqOneByteString::kHeaderSize -
+                                            kHeapObjectTag),
+          code, etrue1, if_true1);
+
+      // Remember it in the {cache}.
+      etrue1 = graph()->NewNode(
+          simplified()->StoreElement(AccessBuilder::ForFixedArrayElement()),
+          cache, index, vtrue1, etrue1, if_true1);
+    }
+
+    // Use the {entry} from the {cache}.
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* efalse1 = etrue0;
+    Node* vfalse1 = entry;
+
+    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    etrue0 =
+        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
+    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                              vtrue1, vfalse1, if_true0);
+  }
+
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  Node* efalse0 = effect;
+  Node* vfalse0;
+  {
+    // Allocate a new SeqTwoByteString for {code}.
+    vfalse0 = efalse0 =
+        graph()->NewNode(simplified()->Allocate(NOT_TENURED),
+                         jsgraph()->Int32Constant(SeqTwoByteString::SizeFor(1)),
+                         efalse0, if_false0);
+    efalse0 = graph()->NewNode(
+        simplified()->StoreField(AccessBuilder::ForMap()), vfalse0,
+        jsgraph()->HeapConstant(factory()->string_map()), efalse0, if_false0);
+    efalse0 = graph()->NewNode(
+        simplified()->StoreField(AccessBuilder::ForNameHashField()), vfalse0,
+        jsgraph()->IntPtrConstant(Name::kEmptyHashField), efalse0, if_false0);
+    efalse0 = graph()->NewNode(
+        simplified()->StoreField(AccessBuilder::ForStringLength()), vfalse0,
+        jsgraph()->SmiConstant(1), efalse0, if_false0);
+    efalse0 = graph()->NewNode(
+        machine()->Store(StoreRepresentation(MachineRepresentation::kWord16,
+                                             kNoWriteBarrier)),
+        vfalse0, jsgraph()->IntPtrConstant(SeqTwoByteString::kHeaderSize -
+                                           kHeapObjectTag),
+        code, efalse0, if_false0);
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+  value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                           vtrue0, vfalse0, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckFloat64Hole(Node* node, Node* frame_state,
+                                               Node* effect, Node* control) {
+  // If we reach this point w/o eliminating the {node} that's marked
+  // with allow-return-hole, we cannot do anything, so just deoptimize
+  // in case of the hole NaN (similar to Crankshaft).
+  Node* value = node->InputAt(0);
+  Node* check = graph()->NewNode(
+      machine()->Word32Equal(),
+      graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
+      jsgraph()->Int32Constant(kHoleNanUpper32));
+  control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
+                                      frame_state, effect, control);
+
+  // Make sure the lowered node does not appear in any use lists.
+  node->TrimInputCount(0);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckTaggedHole(Node* node, Node* frame_state,
+                                              Node* effect, Node* control) {
+  CheckTaggedHoleMode mode = CheckTaggedHoleModeOf(node->op());
+  Node* value = node->InputAt(0);
+  Node* check = graph()->NewNode(machine()->WordEqual(), value,
+                                 jsgraph()->TheHoleConstant());
+  switch (mode) {
+    case CheckTaggedHoleMode::kConvertHoleToUndefined:
+      value = graph()->NewNode(
+          common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+          check, jsgraph()->UndefinedConstant(), value);
+      break;
+    case CheckTaggedHoleMode::kNeverReturnHole:
+      control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
+                                          frame_state, effect, control);
+      break;
+  }
+
+  // Make sure the lowered node does not appear in any use lists.
+  node->TrimInputCount(0);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
 EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value, Node* effect,
                                                      Node* control) {
   Node* result = effect = graph()->NewNode(
@@ -961,7 +1507,6 @@
   }
   return value;
 }
-
 Node* EffectControlLinearizer::ObjectIsSmi(Node* value) {
   return graph()->NewNode(
       machine()->WordEqual(),
@@ -978,6 +1523,148 @@
   return jsgraph()->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
 }
 
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerPlainPrimitiveToNumber(Node* node, Node* effect,
+                                                     Node* control) {
+  Node* value = node->InputAt(0);
+  Node* result = effect =
+      graph()->NewNode(ToNumberOperator(), jsgraph()->ToNumberBuiltinConstant(),
+                       value, jsgraph()->NoContextConstant(), effect, control);
+  return ValueEffectControl(result, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerPlainPrimitiveToWord32(Node* node, Node* effect,
+                                                     Node* control) {
+  Node* value = node->InputAt(0);
+
+  Node* check0 = ObjectIsSmi(value);
+  Node* branch0 =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* etrue0 = effect;
+  Node* vtrue0 = ChangeSmiToInt32(value);
+
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  Node* efalse0 = effect;
+  Node* vfalse0;
+  {
+    vfalse0 = efalse0 = graph()->NewNode(
+        ToNumberOperator(), jsgraph()->ToNumberBuiltinConstant(), value,
+        jsgraph()->NoContextConstant(), efalse0, if_false0);
+
+    Node* check1 = ObjectIsSmi(vfalse0);
+    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* etrue1 = efalse0;
+    Node* vtrue1 = ChangeSmiToInt32(vfalse0);
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* efalse1 = efalse0;
+    Node* vfalse1;
+    {
+      vfalse1 = efalse1 = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), efalse0,
+          efalse1, if_false1);
+      vfalse1 = graph()->NewNode(machine()->TruncateFloat64ToWord32(), vfalse1);
+    }
+
+    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    efalse0 =
+        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+    vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+                               vtrue1, vfalse1, if_false0);
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+  value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+                           vtrue0, vfalse0, control);
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerPlainPrimitiveToFloat64(Node* node, Node* effect,
+                                                      Node* control) {
+  Node* value = node->InputAt(0);
+
+  Node* check0 = ObjectIsSmi(value);
+  Node* branch0 =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* etrue0 = effect;
+  Node* vtrue0;
+  {
+    vtrue0 = ChangeSmiToInt32(value);
+    vtrue0 = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue0);
+  }
+
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  Node* efalse0 = effect;
+  Node* vfalse0;
+  {
+    vfalse0 = efalse0 = graph()->NewNode(
+        ToNumberOperator(), jsgraph()->ToNumberBuiltinConstant(), value,
+        jsgraph()->NoContextConstant(), efalse0, if_false0);
+
+    Node* check1 = ObjectIsSmi(vfalse0);
+    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* etrue1 = efalse0;
+    Node* vtrue1;
+    {
+      vtrue1 = ChangeSmiToInt32(vfalse0);
+      vtrue1 = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue1);
+    }
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* efalse1 = efalse0;
+    Node* vfalse1;
+    {
+      vfalse1 = efalse1 = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), efalse0,
+          efalse1, if_false1);
+    }
+
+    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    efalse0 =
+        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+    vfalse0 =
+        graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                         vtrue1, vfalse1, if_false0);
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+  value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                           vtrue0, vfalse0, control);
+  return ValueEffectControl(value, effect, control);
+}
+
+Factory* EffectControlLinearizer::factory() const {
+  return isolate()->factory();
+}
+
+Isolate* EffectControlLinearizer::isolate() const {
+  return jsgraph()->isolate();
+}
+
+Operator const* EffectControlLinearizer::ToNumberOperator() {
+  if (!to_number_operator_.is_set()) {
+    Callable callable = CodeFactory::ToNumber(isolate());
+    CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+    CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+        isolate(), graph()->zone(), callable.descriptor(), 0, flags,
+        Operator::kNoThrow);
+    to_number_operator_.set(common()->Call(desc));
+  }
+  return to_number_operator_.get();
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/effect-control-linearizer.h b/src/compiler/effect-control-linearizer.h
index 7d7f938..280b4b7 100644
--- a/src/compiler/effect-control-linearizer.h
+++ b/src/compiler/effect-control-linearizer.h
@@ -30,7 +30,8 @@
   void Run();
 
  private:
-  void ProcessNode(Node* node, Node** current_effect, Node** control);
+  void ProcessNode(Node* node, Node** frame_state, Node** effect,
+                   Node** control);
 
   struct ValueEffectControl {
     Node* value;
@@ -40,7 +41,8 @@
         : value(value), effect(effect), control(control) {}
   };
 
-  bool TryWireInStateEffect(Node* node, Node** effect, Node** control);
+  bool TryWireInStateEffect(Node* node, Node* frame_state, Node** effect,
+                            Node** control);
   ValueEffectControl LowerTypeGuard(Node* node, Node* effect, Node* control);
   ValueEffectControl LowerChangeBitToTagged(Node* node, Node* effect,
                                             Node* control);
@@ -60,8 +62,28 @@
                                               Node* control);
   ValueEffectControl LowerChangeTaggedToUint32(Node* node, Node* effect,
                                                Node* control);
+  ValueEffectControl LowerCheckBounds(Node* node, Node* frame_state,
+                                      Node* effect, Node* control);
+  ValueEffectControl LowerCheckTaggedPointer(Node* node, Node* frame_state,
+                                             Node* effect, Node* control);
+  ValueEffectControl LowerCheckTaggedSigned(Node* node, Node* frame_state,
+                                            Node* effect, Node* control);
+  ValueEffectControl LowerCheckedInt32Add(Node* node, Node* frame_state,
+                                          Node* effect, Node* control);
+  ValueEffectControl LowerCheckedInt32Sub(Node* node, Node* frame_state,
+                                          Node* effect, Node* control);
+  ValueEffectControl LowerCheckedUint32ToInt32(Node* node, Node* frame_state,
+                                               Node* effect, Node* control);
+  ValueEffectControl LowerCheckedFloat64ToInt32(Node* node, Node* frame_state,
+                                                Node* effect, Node* control);
+  ValueEffectControl LowerCheckedTaggedToInt32(Node* node, Node* frame_state,
+                                               Node* effect, Node* control);
+  ValueEffectControl LowerCheckedTaggedToFloat64(Node* node, Node* frame_state,
+                                                 Node* effect, Node* control);
   ValueEffectControl LowerChangeTaggedToFloat64(Node* node, Node* effect,
                                                 Node* control);
+  ValueEffectControl LowerTruncateTaggedToFloat64(Node* node, Node* effect,
+                                                  Node* control);
   ValueEffectControl LowerTruncateTaggedToWord32(Node* node, Node* effect,
                                                  Node* control);
   ValueEffectControl LowerObjectIsCallable(Node* node, Node* effect,
@@ -75,8 +97,27 @@
                                          Node* control);
   ValueEffectControl LowerObjectIsUndetectable(Node* node, Node* effect,
                                                Node* control);
+  ValueEffectControl LowerStringFromCharCode(Node* node, Node* effect,
+                                             Node* control);
+  ValueEffectControl LowerCheckFloat64Hole(Node* node, Node* frame_state,
+                                           Node* effect, Node* control);
+  ValueEffectControl LowerCheckTaggedHole(Node* node, Node* frame_state,
+                                          Node* effect, Node* control);
+  ValueEffectControl LowerPlainPrimitiveToNumber(Node* node, Node* effect,
+                                                 Node* control);
+  ValueEffectControl LowerPlainPrimitiveToWord32(Node* node, Node* effect,
+                                                 Node* control);
+  ValueEffectControl LowerPlainPrimitiveToFloat64(Node* node, Node* effect,
+                                                  Node* control);
+
   ValueEffectControl AllocateHeapNumberWithValue(Node* node, Node* effect,
                                                  Node* control);
+  ValueEffectControl BuildCheckedFloat64ToInt32(Node* value, Node* frame_state,
+                                                Node* effect, Node* control);
+  ValueEffectControl BuildCheckedHeapNumberOrOddballToFloat64(Node* value,
+                                                              Node* frame_state,
+                                                              Node* effect,
+                                                              Node* control);
 
   Node* ChangeInt32ToSmi(Node* value);
   Node* ChangeUint32ToSmi(Node* value);
@@ -88,6 +129,8 @@
   Node* SmiMaxValueConstant();
   Node* SmiShiftBitsConstant();
 
+  Factory* factory() const;
+  Isolate* isolate() const;
   JSGraph* jsgraph() const { return js_graph_; }
   Graph* graph() const;
   Schedule* schedule() const { return schedule_; }
@@ -96,9 +139,14 @@
   SimplifiedOperatorBuilder* simplified() const;
   MachineOperatorBuilder* machine() const;
 
+  Operator const* ToNumberOperator();
+
   JSGraph* js_graph_;
   Schedule* schedule_;
   Zone* temp_zone_;
+  RegionObservability region_observability_ = RegionObservability::kObservable;
+
+  SetOncePointer<Operator const> to_number_operator_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/escape-analysis.cc b/src/compiler/escape-analysis.cc
index d11c3ab..9409a27 100644
--- a/src/compiler/escape-analysis.cc
+++ b/src/compiler/escape-analysis.cc
@@ -794,6 +794,12 @@
         break;
       case IrOpcode::kSelect:
       case IrOpcode::kTypeGuard:
+      // TODO(mstarzinger): The following list of operators will eventually be
+      // handled by the EscapeAnalysisReducer (similar to ObjectIsSmi).
+      case IrOpcode::kObjectIsCallable:
+      case IrOpcode::kObjectIsNumber:
+      case IrOpcode::kObjectIsString:
+      case IrOpcode::kObjectIsUndetectable:
         if (SetEscaped(rep)) {
           TRACE("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
                 rep->id(), rep->op()->mnemonic(), use->id(),
@@ -843,6 +849,7 @@
 EscapeAnalysis::EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common,
                                Zone* zone)
     : zone_(zone),
+      slot_not_analyzed_(graph->NewNode(common->NumberConstant(0x1c0debad))),
       common_(common),
       status_analysis_(new (zone) EscapeStatusAnalysis(this, graph, zone)),
       virtual_states_(zone),
@@ -1321,11 +1328,24 @@
   return false;
 }
 
-int EscapeAnalysis::OffsetFromAccess(Node* node) {
-  DCHECK(OpParameter<FieldAccess>(node).offset % kPointerSize == 0);
-  return OpParameter<FieldAccess>(node).offset / kPointerSize;
+namespace {
+
+int OffsetForFieldAccess(Node* node) {
+  FieldAccess access = FieldAccessOf(node->op());
+  DCHECK_EQ(access.offset % kPointerSize, 0);
+  return access.offset / kPointerSize;
 }
 
+int OffsetForElementAccess(Node* node, int index) {
+  ElementAccess access = ElementAccessOf(node->op());
+  DCHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
+            kPointerSizeLog2);
+  DCHECK_EQ(access.header_size % kPointerSize, 0);
+  return access.header_size / kPointerSize + index;
+}
+
+}  // namespace
+
 void EscapeAnalysis::ProcessLoadFromPhi(int offset, Node* from, Node* load,
                                         VirtualState* state) {
   TRACE("Load #%d from phi #%d", load->id(), from->id());
@@ -1368,11 +1388,9 @@
   Node* from = ResolveReplacement(NodeProperties::GetValueInput(node, 0));
   VirtualState* state = virtual_states_[node->id()];
   if (VirtualObject* object = GetVirtualObject(state, from)) {
-    int offset = OffsetFromAccess(node);
-    if (!object->IsTracked() ||
-        static_cast<size_t>(offset) >= object->field_count()) {
-      return;
-    }
+    if (!object->IsTracked()) return;
+    int offset = OffsetForFieldAccess(node);
+    if (static_cast<size_t>(offset) >= object->field_count()) return;
     Node* value = object->GetField(offset);
     if (value) {
       value = ResolveReplacement(value);
@@ -1380,8 +1398,8 @@
     // Record that the load has this alias.
     UpdateReplacement(state, node, value);
   } else if (from->opcode() == IrOpcode::kPhi &&
-             OpParameter<FieldAccess>(node).offset % kPointerSize == 0) {
-    int offset = OffsetFromAccess(node);
+             FieldAccessOf(node->op()).offset % kPointerSize == 0) {
+    int offset = OffsetForFieldAccess(node);
     // Only binary phis are supported for now.
     ProcessLoadFromPhi(offset, from, node, state);
   } else {
@@ -1400,19 +1418,11 @@
          index_node->opcode() != IrOpcode::kInt64Constant &&
          index_node->opcode() != IrOpcode::kFloat32Constant &&
          index_node->opcode() != IrOpcode::kFloat64Constant);
-  ElementAccess access = OpParameter<ElementAccess>(node);
   if (index.HasValue()) {
-    int offset = index.Value() + access.header_size / kPointerSize;
     if (VirtualObject* object = GetVirtualObject(state, from)) {
-      CHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
-               kPointerSizeLog2);
-      CHECK_EQ(access.header_size % kPointerSize, 0);
-
-      if (!object->IsTracked() ||
-          static_cast<size_t>(offset) >= object->field_count()) {
-        return;
-      }
-
+      if (!object->IsTracked()) return;
+      int offset = OffsetForElementAccess(node, index.Value());
+      if (static_cast<size_t>(offset) >= object->field_count()) return;
       Node* value = object->GetField(offset);
       if (value) {
         value = ResolveReplacement(value);
@@ -1420,8 +1430,7 @@
       // Record that the load has this alias.
       UpdateReplacement(state, node, value);
     } else if (from->opcode() == IrOpcode::kPhi) {
-      ElementAccess access = OpParameter<ElementAccess>(node);
-      int offset = index.Value() + access.header_size / kPointerSize;
+      int offset = OffsetForElementAccess(node, index.Value());
       ProcessLoadFromPhi(offset, from, node, state);
     } else {
       UpdateReplacement(state, node, nullptr);
@@ -1443,14 +1452,23 @@
   ForwardVirtualState(node);
   Node* to = ResolveReplacement(NodeProperties::GetValueInput(node, 0));
   VirtualState* state = virtual_states_[node->id()];
-  VirtualObject* obj = GetVirtualObject(state, to);
-  int offset = OffsetFromAccess(node);
-  if (obj && obj->IsTracked() &&
-      static_cast<size_t>(offset) < obj->field_count()) {
+  if (VirtualObject* object = GetVirtualObject(state, to)) {
+    if (!object->IsTracked()) return;
+    int offset = OffsetForFieldAccess(node);
+    if (static_cast<size_t>(offset) >= object->field_count()) return;
     Node* val = ResolveReplacement(NodeProperties::GetValueInput(node, 1));
-    if (obj->GetField(offset) != val) {
-      obj = CopyForModificationAt(obj, state, node);
-      obj->SetField(offset, val);
+    // TODO(mstarzinger): The following is a workaround to not track the code
+    // entry field in virtual JSFunction objects. We only ever store the inner
+    // pointer into the compile lazy stub in this field and the deoptimizer has
+    // this assumption hard-coded in {TranslatedState::MaterializeAt} as well.
+    if (val->opcode() == IrOpcode::kInt32Constant ||
+        val->opcode() == IrOpcode::kInt64Constant) {
+      DCHECK_EQ(JSFunction::kCodeEntryOffset, FieldAccessOf(node->op()).offset);
+      val = slot_not_analyzed_;
+    }
+    if (object->GetField(offset) != val) {
+      object = CopyForModificationAt(object, state, node);
+      object->SetField(offset, val);
     }
   }
 }
@@ -1465,20 +1483,16 @@
          index_node->opcode() != IrOpcode::kInt64Constant &&
          index_node->opcode() != IrOpcode::kFloat32Constant &&
          index_node->opcode() != IrOpcode::kFloat64Constant);
-  ElementAccess access = OpParameter<ElementAccess>(node);
   VirtualState* state = virtual_states_[node->id()];
-  VirtualObject* obj = GetVirtualObject(state, to);
   if (index.HasValue()) {
-    int offset = index.Value() + access.header_size / kPointerSize;
-    if (obj && obj->IsTracked() &&
-        static_cast<size_t>(offset) < obj->field_count()) {
-      CHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
-               kPointerSizeLog2);
-      CHECK_EQ(access.header_size % kPointerSize, 0);
+    if (VirtualObject* object = GetVirtualObject(state, to)) {
+      if (!object->IsTracked()) return;
+      int offset = OffsetForElementAccess(node, index.Value());
+      if (static_cast<size_t>(offset) >= object->field_count()) return;
       Node* val = ResolveReplacement(NodeProperties::GetValueInput(node, 2));
-      if (obj->GetField(offset) != val) {
-        obj = CopyForModificationAt(obj, state, node);
-        obj->SetField(offset, val);
+      if (object->GetField(offset) != val) {
+        object = CopyForModificationAt(object, state, node);
+        object->SetField(offset, val);
       }
     }
   } else {
@@ -1490,12 +1504,13 @@
           to->id(), to->op()->mnemonic(), node->id(), index_node->id(),
           index_node->op()->mnemonic());
     }
-    if (obj && obj->IsTracked()) {
-      if (!obj->AllFieldsClear()) {
-        obj = CopyForModificationAt(obj, state, node);
-        obj->ClearAllFields();
+    if (VirtualObject* object = GetVirtualObject(state, to)) {
+      if (!object->IsTracked()) return;
+      if (!object->AllFieldsClear()) {
+        object = CopyForModificationAt(object, state, node);
+        object->ClearAllFields();
         TRACE("Cleared all fields of @%d:#%d\n",
-              status_analysis_->GetAlias(obj->id()), obj->id());
+              status_analysis_->GetAlias(object->id()), object->id());
       }
     }
   }
diff --git a/src/compiler/escape-analysis.h b/src/compiler/escape-analysis.h
index 139abd7..839e54c 100644
--- a/src/compiler/escape-analysis.h
+++ b/src/compiler/escape-analysis.h
@@ -51,7 +51,6 @@
                           VirtualState* states);
 
   void ForwardVirtualState(Node* node);
-  int OffsetFromAccess(Node* node);
   VirtualState* CopyForModificationAt(VirtualState* state, Node* node);
   VirtualObject* CopyForModificationAt(VirtualObject* obj, VirtualState* state,
                                        Node* node);
@@ -71,6 +70,7 @@
   CommonOperatorBuilder* common() const { return common_; }
 
   Zone* const zone_;
+  Node* const slot_not_analyzed_;
   CommonOperatorBuilder* const common_;
   EscapeStatusAnalysis* status_analysis_;
   ZoneVector<VirtualState*> virtual_states_;
diff --git a/src/compiler/gap-resolver.cc b/src/compiler/gap-resolver.cc
index 9403d35..7c39700 100644
--- a/src/compiler/gap-resolver.cc
+++ b/src/compiler/gap-resolver.cc
@@ -75,7 +75,7 @@
   // This move's source may have changed due to swaps to resolve cycles and so
   // it may now be the last move in the cycle.  If so remove it.
   InstructionOperand source = move->source();
-  if (source.EqualsCanonicalized(destination)) {
+  if (source.InterferesWith(destination)) {
     move->Eliminate();
     return;
   }
diff --git a/src/compiler/graph-reducer.h b/src/compiler/graph-reducer.h
index 683c345..2ac60a6 100644
--- a/src/compiler/graph-reducer.h
+++ b/src/compiler/graph-reducer.h
@@ -74,8 +74,7 @@
     virtual void Revisit(Node* node) = 0;
     // Replace value uses of {node} with {value} and effect uses of {node} with
     // {effect}. If {effect == nullptr}, then use the effect input to {node}.
-    // All
-    // control uses will be relaxed assuming {node} cannot throw.
+    // All control uses will be relaxed assuming {node} cannot throw.
     virtual void ReplaceWithValue(Node* node, Node* value, Node* effect,
                                   Node* control) = 0;
   };
diff --git a/src/compiler/graph-visualizer.cc b/src/compiler/graph-visualizer.cc
index 1dc38df..2e39764 100644
--- a/src/compiler/graph-visualizer.cc
+++ b/src/compiler/graph-visualizer.cc
@@ -36,14 +36,34 @@
   } else {
     SNPrintF(filename, "turbo-none-%s", phase);
   }
+  EmbeddedVector<char, 256> source_file(0);
+  bool source_available = false;
+  if (FLAG_trace_file_names && info->parse_info()) {
+    Object* source_name = info->script()->name();
+    if (source_name->IsString()) {
+      String* str = String::cast(source_name);
+      if (str->length() > 0) {
+        SNPrintF(source_file, "%s", str->ToCString().get());
+        std::replace(source_file.start(),
+                     source_file.start() + source_file.length(), '/', '_');
+        source_available = true;
+      }
+    }
+  }
   std::replace(filename.start(), filename.start() + filename.length(), ' ',
                '_');
 
   EmbeddedVector<char, 256> full_filename;
-  if (phase == nullptr) {
+  if (phase == nullptr && !source_available) {
     SNPrintF(full_filename, "%s.%s", filename.start(), suffix);
-  } else {
+  } else if (phase != nullptr && !source_available) {
     SNPrintF(full_filename, "%s-%s.%s", filename.start(), phase, suffix);
+  } else if (phase == nullptr && source_available) {
+    SNPrintF(full_filename, "%s_%s.%s", filename.start(), source_file.start(),
+             suffix);
+  } else {
+    SNPrintF(full_filename, "%s_%s-%s.%s", filename.start(),
+             source_file.start(), phase, suffix);
   }
 
   char* buffer = new char[full_filename.length() + 1];
@@ -494,9 +514,8 @@
       for (int j = instruction_block->first_instruction_index();
            j <= instruction_block->last_instruction_index(); j++) {
         PrintIndent();
-        PrintableInstruction printable = {
-            RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
-            instructions->InstructionAt(j)};
+        PrintableInstruction printable = {RegisterConfiguration::Turbofan(),
+                                          instructions->InstructionAt(j)};
         os_ << j << " " << printable << " <|@\n";
       }
     }
@@ -539,13 +558,17 @@
     os_ << vreg << ":" << range->relative_id() << " " << type;
     if (range->HasRegisterAssigned()) {
       AllocatedOperand op = AllocatedOperand::cast(range->GetAssignedOperand());
-      if (op.IsFPRegister()) {
-        DoubleRegister assigned_reg = op.GetDoubleRegister();
-        os_ << " \"" << assigned_reg.ToString() << "\"";
+      const auto config = RegisterConfiguration::Turbofan();
+      if (op.IsRegister()) {
+        os_ << " \"" << config->GetGeneralRegisterName(op.register_code())
+            << "\"";
+      } else if (op.IsDoubleRegister()) {
+        os_ << " \"" << config->GetDoubleRegisterName(op.register_code())
+            << "\"";
       } else {
-        DCHECK(op.IsRegister());
-        Register assigned_reg = op.GetRegister();
-        os_ << " \"" << assigned_reg.ToString() << "\"";
+        DCHECK(op.IsFloatRegister());
+        os_ << " \"" << config->GetFloatRegisterName(op.register_code())
+            << "\"";
       }
     } else if (range->spilled()) {
       const TopLevelLiveRange* top = range->TopLevel();
@@ -618,6 +641,20 @@
 std::ostream& operator<<(std::ostream& os, const AsRPO& ar) {
   base::AccountingAllocator allocator;
   Zone local_zone(&allocator);
+
+  // Do a post-order depth-first search on the RPO graph. For every node,
+  // print:
+  //
+  //   - the node id
+  //   - the operator mnemonic
+  //   - in square brackets its parameter (if present)
+  //   - in parentheses the list of argument ids and their mnemonics
+  //   - the node type (if it is typed)
+
+  // Post-order guarantees that all inputs of a node will be printed before
+  // the node itself, if there are no cycles. Any cycles are broken
+  // arbitrarily.
+
   ZoneVector<byte> state(ar.graph.NodeCount(), kUnvisited, &local_zone);
   ZoneStack<Node*> stack(&local_zone);
 
@@ -638,12 +675,14 @@
       state[n->id()] = kVisited;
       stack.pop();
       os << "#" << n->id() << ":" << *n->op() << "(";
+      // Print the inputs.
       int j = 0;
       for (Node* const i : n->inputs()) {
         if (j++ > 0) os << ", ";
         os << "#" << SafeId(i) << ":" << SafeMnemonic(i);
       }
       os << ")";
+      // Print the node type, if any.
       if (NodeProperties::IsTyped(n)) {
         os << "  [Type: ";
         NodeProperties::GetType(n)->PrintTo(os);
diff --git a/src/compiler/graph.h b/src/compiler/graph.h
index 958a15d..a694a0b 100644
--- a/src/compiler/graph.h
+++ b/src/compiler/graph.h
@@ -28,11 +28,30 @@
 // out-of-line data associated with each node.
 typedef uint32_t NodeId;
 
-
-class Graph : public ZoneObject {
+class Graph final : public ZoneObject {
  public:
   explicit Graph(Zone* zone);
 
+  // Scope used when creating a subgraph for inlining. Automatically preserves
+  // the original start and end nodes of the graph, and resets them when you
+  // leave the scope.
+  class SubgraphScope final {
+   public:
+    explicit SubgraphScope(Graph* graph)
+        : graph_(graph), start_(graph->start()), end_(graph->end()) {}
+    ~SubgraphScope() {
+      graph_->SetStart(start_);
+      graph_->SetEnd(end_);
+    }
+
+   private:
+    Graph* const graph_;
+    Node* const start_;
+    Node* const end_;
+
+    DISALLOW_COPY_AND_ASSIGN(SubgraphScope);
+  };
+
   // Base implementation used by all factory methods.
   Node* NewNodeUnchecked(const Operator* op, int input_count,
                          Node* const* inputs, bool incomplete = false);
diff --git a/src/compiler/greedy-allocator.cc b/src/compiler/greedy-allocator.cc
deleted file mode 100644
index 683b75d..0000000
--- a/src/compiler/greedy-allocator.cc
+++ /dev/null
@@ -1,629 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/greedy-allocator.h"
-#include "src/compiler/register-allocator.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-
-#define TRACE(...)                             \
-  do {                                         \
-    if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
-  } while (false)
-
-
-const float GreedyAllocator::kAllocatedRangeMultiplier = 10.0;
-
-
-namespace {
-
-void UpdateOperands(LiveRange* range, RegisterAllocationData* data) {
-  int reg_id = range->assigned_register();
-  range->SetUseHints(reg_id);
-  if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
-    data->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg_id);
-  }
-}
-
-
-void UnsetOperands(LiveRange* range, RegisterAllocationData* data) {
-  range->UnsetUseHints();
-  if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
-    data->GetPhiMapValueFor(range->TopLevel())->UnsetAssignedRegister();
-  }
-}
-
-
-LiveRange* Split(LiveRange* range, RegisterAllocationData* data,
-                 LifetimePosition pos) {
-  DCHECK(range->Start() < pos && pos < range->End());
-  DCHECK(pos.IsStart() || pos.IsGapPosition() ||
-         (data->code()
-              ->GetInstructionBlock(pos.ToInstructionIndex())
-              ->last_instruction_index() != pos.ToInstructionIndex()));
-  LiveRange* result = range->SplitAt(pos, data->allocation_zone());
-  return result;
-}
-
-
-}  // namespace
-
-
-AllocationCandidate AllocationScheduler::GetNext() {
-  DCHECK(!queue_.empty());
-  AllocationCandidate ret = queue_.top();
-  queue_.pop();
-  return ret;
-}
-
-
-void AllocationScheduler::Schedule(LiveRange* range) {
-  TRACE("Scheduling live range %d:%d.\n", range->TopLevel()->vreg(),
-        range->relative_id());
-  queue_.push(AllocationCandidate(range));
-}
-
-
-void AllocationScheduler::Schedule(LiveRangeGroup* group) {
-  queue_.push(AllocationCandidate(group));
-}
-
-GreedyAllocator::GreedyAllocator(RegisterAllocationData* data,
-                                 RegisterKind kind, Zone* local_zone)
-    : RegisterAllocator(data, kind),
-      local_zone_(local_zone),
-      allocations_(local_zone),
-      scheduler_(local_zone),
-      groups_(local_zone) {}
-
-
-void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
-  TRACE("Assigning register %s to live range %d:%d\n", RegisterName(reg_id),
-        range->TopLevel()->vreg(), range->relative_id());
-
-  DCHECK(!range->HasRegisterAssigned());
-
-  AllocateRegisterToRange(reg_id, range);
-
-  TRACE("Assigning %s to range %d%d.\n", RegisterName(reg_id),
-        range->TopLevel()->vreg(), range->relative_id());
-  range->set_assigned_register(reg_id);
-  UpdateOperands(range, data());
-}
-
-
-void GreedyAllocator::PreallocateFixedRanges() {
-  allocations_.resize(num_registers());
-  for (int i = 0; i < num_registers(); i++) {
-    allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone());
-  }
-
-  for (LiveRange* fixed_range : GetFixedRegisters()) {
-    if (fixed_range != nullptr) {
-      DCHECK_EQ(mode(), fixed_range->kind());
-      DCHECK(fixed_range->TopLevel()->IsFixed());
-
-      int reg_nr = fixed_range->assigned_register();
-      EnsureValidRangeWeight(fixed_range);
-      AllocateRegisterToRange(reg_nr, fixed_range);
-    }
-  }
-}
-
-
-void GreedyAllocator::GroupLiveRanges() {
-  CoalescedLiveRanges grouper(local_zone());
-  for (TopLevelLiveRange* range : data()->live_ranges()) {
-    grouper.clear();
-    // Skip splinters, because we do not want to optimize for them, and moves
-    // due to assigning them to different registers occur in deferred blocks.
-    if (!CanProcessRange(range) || range->IsSplinter() || !range->is_phi()) {
-      continue;
-    }
-
-    // A phi can't be a memory operand, so it couldn't have been split.
-    DCHECK(!range->spilled());
-
-    // Maybe this phi range is itself an input to another phi which was already
-    // processed.
-    LiveRangeGroup* latest_grp = range->group() != nullptr
-                                     ? range->group()
-                                     : new (local_zone())
-                                           LiveRangeGroup(local_zone());
-
-    // Populate the grouper.
-    if (range->group() == nullptr) {
-      grouper.AllocateRange(range);
-    } else {
-      for (LiveRange* member : range->group()->ranges()) {
-        grouper.AllocateRange(member);
-      }
-    }
-    for (int j : data()->GetPhiMapValueFor(range)->phi()->operands()) {
-      // skip output also in input, which may happen for loops.
-      if (j == range->vreg()) continue;
-
-      TopLevelLiveRange* other_top = data()->live_ranges()[j];
-
-      if (other_top->IsSplinter()) continue;
-      // If the other was a memory operand, it might have been split.
-      // So get the unsplit part.
-      LiveRange* other =
-          other_top->next() == nullptr ? other_top : other_top->next();
-
-      if (other->spilled()) continue;
-
-      LiveRangeGroup* other_group = other->group();
-      if (other_group != nullptr) {
-        bool can_merge = true;
-        for (LiveRange* member : other_group->ranges()) {
-          if (grouper.GetConflicts(member).Current() != nullptr) {
-            can_merge = false;
-            break;
-          }
-        }
-        // If each member doesn't conflict with the current group, then since
-        // the members don't conflict with eachother either, we can merge them.
-        if (can_merge) {
-          latest_grp->ranges().insert(latest_grp->ranges().end(),
-                                      other_group->ranges().begin(),
-                                      other_group->ranges().end());
-          for (LiveRange* member : other_group->ranges()) {
-            grouper.AllocateRange(member);
-            member->set_group(latest_grp);
-          }
-          // Clear the other range, so we avoid scheduling it.
-          other_group->ranges().clear();
-        }
-      } else if (grouper.GetConflicts(other).Current() == nullptr) {
-        grouper.AllocateRange(other);
-        latest_grp->ranges().push_back(other);
-        other->set_group(latest_grp);
-      }
-    }
-
-    if (latest_grp->ranges().size() > 0 && range->group() == nullptr) {
-      latest_grp->ranges().push_back(range);
-      DCHECK(latest_grp->ranges().size() > 1);
-      groups().push_back(latest_grp);
-      range->set_group(latest_grp);
-    }
-  }
-}
-
-
-void GreedyAllocator::ScheduleAllocationCandidates() {
-  for (LiveRangeGroup* group : groups()) {
-    if (group->ranges().size() > 0) {
-      // We shouldn't have added single-range groups.
-      DCHECK(group->ranges().size() != 1);
-      scheduler().Schedule(group);
-    }
-  }
-  for (LiveRange* range : data()->live_ranges()) {
-    if (CanProcessRange(range)) {
-      for (LiveRange* child = range; child != nullptr; child = child->next()) {
-        if (!child->spilled() && child->group() == nullptr) {
-          scheduler().Schedule(child);
-        }
-      }
-    }
-  }
-}
-
-
-void GreedyAllocator::TryAllocateCandidate(
-    const AllocationCandidate& candidate) {
-  if (candidate.is_group()) {
-    TryAllocateGroup(candidate.group());
-  } else {
-    TryAllocateLiveRange(candidate.live_range());
-  }
-}
-
-
-void GreedyAllocator::TryAllocateGroup(LiveRangeGroup* group) {
-  float group_weight = 0.0;
-  for (LiveRange* member : group->ranges()) {
-    EnsureValidRangeWeight(member);
-    group_weight = Max(group_weight, member->weight());
-  }
-
-  float eviction_weight = group_weight;
-  int eviction_reg = -1;
-  int free_reg = -1;
-  for (int i = 0; i < num_allocatable_registers(); ++i) {
-    int reg = allocatable_register_code(i);
-    float weight = GetMaximumConflictingWeight(reg, group, group_weight);
-    if (weight == LiveRange::kInvalidWeight) {
-      free_reg = reg;
-      break;
-    }
-    if (weight < eviction_weight) {
-      eviction_weight = weight;
-      eviction_reg = reg;
-    }
-  }
-  if (eviction_reg < 0 && free_reg < 0) {
-    for (LiveRange* member : group->ranges()) {
-      scheduler().Schedule(member);
-    }
-    return;
-  }
-  if (free_reg < 0) {
-    DCHECK(eviction_reg >= 0);
-    for (LiveRange* member : group->ranges()) {
-      EvictAndRescheduleConflicts(eviction_reg, member);
-    }
-    free_reg = eviction_reg;
-  }
-
-  DCHECK(free_reg >= 0);
-  for (LiveRange* member : group->ranges()) {
-    AssignRangeToRegister(free_reg, member);
-  }
-}
-
-
-void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
-  // TODO(mtrofin): once we introduce groups, we'll want to first try and
-  // allocate at the preferred register.
-  TRACE("Attempting to allocate live range %d:%d.\n", range->TopLevel()->vreg(),
-        range->relative_id());
-  int free_reg = -1;
-  int evictable_reg = -1;
-  int hinted_reg = -1;
-
-  EnsureValidRangeWeight(range);
-  float competing_weight = range->weight();
-  DCHECK(competing_weight != LiveRange::kInvalidWeight);
-
-  // Can we allocate at the hinted register?
-  if (range->FirstHintPosition(&hinted_reg) != nullptr) {
-    DCHECK(hinted_reg >= 0);
-    float max_conflict_weight =
-        GetMaximumConflictingWeight(hinted_reg, range, competing_weight);
-    if (max_conflict_weight == LiveRange::kInvalidWeight) {
-      free_reg = hinted_reg;
-    } else if (max_conflict_weight < range->weight()) {
-      evictable_reg = hinted_reg;
-    }
-  }
-
-  if (free_reg < 0 && evictable_reg < 0) {
-    // There was no hinted reg, or we cannot allocate there.
-    float smallest_weight = LiveRange::kMaxWeight;
-
-    // Seek either the first free register, or, from the set of registers
-    // where the maximum conflict is lower than the candidate's weight, the one
-    // with the smallest such weight.
-    for (int i = 0; i < num_allocatable_registers(); i++) {
-      int reg = allocatable_register_code(i);
-      // Skip unnecessarily re-visiting the hinted register, if any.
-      if (reg == hinted_reg) continue;
-      float max_conflict_weight =
-          GetMaximumConflictingWeight(reg, range, competing_weight);
-      if (max_conflict_weight == LiveRange::kInvalidWeight) {
-        free_reg = reg;
-        break;
-      }
-      if (max_conflict_weight < range->weight() &&
-          max_conflict_weight < smallest_weight) {
-        smallest_weight = max_conflict_weight;
-        evictable_reg = reg;
-      }
-    }
-  }
-
-  // We have a free register, so we use it.
-  if (free_reg >= 0) {
-    TRACE("Found free register %s for live range %d:%d.\n",
-          RegisterName(free_reg), range->TopLevel()->vreg(),
-          range->relative_id());
-    AssignRangeToRegister(free_reg, range);
-    return;
-  }
-
-  // We found a register to perform evictions, so we evict and allocate our
-  // candidate.
-  if (evictable_reg >= 0) {
-    TRACE("Found evictable register %s for live range %d:%d.\n",
-          RegisterName(free_reg), range->TopLevel()->vreg(),
-          range->relative_id());
-    EvictAndRescheduleConflicts(evictable_reg, range);
-    AssignRangeToRegister(evictable_reg, range);
-    return;
-  }
-
-  // The range needs to be split or spilled.
-  SplitOrSpillBlockedRange(range);
-}
-
-
-void GreedyAllocator::EvictAndRescheduleConflicts(unsigned reg_id,
-                                                  const LiveRange* range) {
-  auto conflicts = current_allocations(reg_id)->GetConflicts(range);
-  for (LiveRange* conflict = conflicts.Current(); conflict != nullptr;
-       conflict = conflicts.RemoveCurrentAndGetNext()) {
-    DCHECK(conflict->HasRegisterAssigned());
-    CHECK(!conflict->TopLevel()->IsFixed());
-    conflict->UnsetAssignedRegister();
-    UnsetOperands(conflict, data());
-    UpdateWeightAtEviction(conflict);
-    scheduler().Schedule(conflict);
-    TRACE("Evicted range %d%d.\n", conflict->TopLevel()->vreg(),
-          conflict->relative_id());
-  }
-}
-
-
-void GreedyAllocator::AllocateRegisters() {
-  CHECK(scheduler().empty());
-  CHECK(allocations_.empty());
-
-  TRACE("Begin allocating function %s with the Greedy Allocator\n",
-        data()->debug_name());
-
-  SplitAndSpillRangesDefinedByMemoryOperand(true);
-  GroupLiveRanges();
-  ScheduleAllocationCandidates();
-  PreallocateFixedRanges();
-  while (!scheduler().empty()) {
-    AllocationCandidate candidate = scheduler().GetNext();
-    TryAllocateCandidate(candidate);
-  }
-
-  for (size_t i = 0; i < allocations_.size(); ++i) {
-    if (!allocations_[i]->empty()) {
-      data()->MarkAllocated(mode(), static_cast<int>(i));
-    }
-  }
-  allocations_.clear();
-
-  TryReuseSpillRangesForGroups();
-
-  TRACE("End allocating function %s with the Greedy Allocator\n",
-        data()->debug_name());
-}
-
-
-void GreedyAllocator::TryReuseSpillRangesForGroups() {
-  for (TopLevelLiveRange* top : data()->live_ranges()) {
-    if (!CanProcessRange(top) || !top->is_phi() || top->group() == nullptr) {
-      continue;
-    }
-
-    SpillRange* spill_range = nullptr;
-    for (LiveRange* member : top->group()->ranges()) {
-      if (!member->TopLevel()->HasSpillRange()) continue;
-      SpillRange* member_range = member->TopLevel()->GetSpillRange();
-      if (spill_range == nullptr) {
-        spill_range = member_range;
-      } else {
-        // This may not always succeed, because we group non-conflicting ranges
-        // that may have been splintered, and the splinters may cause conflicts
-        // in the spill ranges.
-        // TODO(mtrofin): should the splinters own their own spill ranges?
-        spill_range->TryMerge(member_range);
-      }
-    }
-  }
-}
-
-
-float GreedyAllocator::GetMaximumConflictingWeight(
-    unsigned reg_id, const LiveRange* range, float competing_weight) const {
-  float ret = LiveRange::kInvalidWeight;
-
-  auto conflicts = current_allocations(reg_id)->GetConflicts(range);
-  for (LiveRange* conflict = conflicts.Current(); conflict != nullptr;
-       conflict = conflicts.GetNext()) {
-    DCHECK_NE(conflict->weight(), LiveRange::kInvalidWeight);
-    if (competing_weight <= conflict->weight()) return LiveRange::kMaxWeight;
-    ret = Max(ret, conflict->weight());
-    DCHECK(ret < LiveRange::kMaxWeight);
-  }
-
-  return ret;
-}
-
-
-float GreedyAllocator::GetMaximumConflictingWeight(unsigned reg_id,
-                                                   const LiveRangeGroup* group,
-                                                   float group_weight) const {
-  float ret = LiveRange::kInvalidWeight;
-
-  for (LiveRange* member : group->ranges()) {
-    float member_conflict_weight =
-        GetMaximumConflictingWeight(reg_id, member, group_weight);
-    if (member_conflict_weight == LiveRange::kMaxWeight) {
-      return LiveRange::kMaxWeight;
-    }
-    if (member_conflict_weight > group_weight) return LiveRange::kMaxWeight;
-    ret = Max(member_conflict_weight, ret);
-  }
-
-  return ret;
-}
-
-
-void GreedyAllocator::EnsureValidRangeWeight(LiveRange* range) {
-  // The live range weight will be invalidated when ranges are created or split.
-  // Otherwise, it is consistently updated when the range is allocated or
-  // unallocated.
-  if (range->weight() != LiveRange::kInvalidWeight) return;
-
-  if (range->TopLevel()->IsFixed()) {
-    range->set_weight(LiveRange::kMaxWeight);
-    return;
-  }
-  if (!IsProgressPossible(range)) {
-    range->set_weight(LiveRange::kMaxWeight);
-    return;
-  }
-
-  float use_count = 0.0;
-  for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) {
-    ++use_count;
-  }
-  range->set_weight(use_count / static_cast<float>(range->GetSize()));
-}
-
-
-void GreedyAllocator::SpillRangeAsLastResort(LiveRange* range) {
-  LifetimePosition start = range->Start();
-  CHECK(range->CanBeSpilled(start));
-
-  DCHECK(range->NextRegisterPosition(start) == nullptr);
-  Spill(range);
-}
-
-
-LiveRange* GreedyAllocator::GetRemainderAfterSplittingAroundFirstCall(
-    LiveRange* range) {
-  LiveRange* ret = range;
-  for (UseInterval* interval = range->first_interval(); interval != nullptr;
-       interval = interval->next()) {
-    LifetimePosition start = interval->start();
-    LifetimePosition end = interval->end();
-    // If the interval starts at instruction end, then the first instruction
-    // in the interval is the next one.
-    int first_full_instruction = (start.IsGapPosition() || start.IsStart())
-                                     ? start.ToInstructionIndex()
-                                     : start.ToInstructionIndex() + 1;
-    // If the interval ends in a gap or at instruction start, then the last
-    // instruction is the previous one.
-    int last_full_instruction = (end.IsGapPosition() || end.IsStart())
-                                    ? end.ToInstructionIndex() - 1
-                                    : end.ToInstructionIndex();
-
-    for (int instruction_index = first_full_instruction;
-         instruction_index <= last_full_instruction; ++instruction_index) {
-      if (!code()->InstructionAt(instruction_index)->IsCall()) continue;
-
-      LifetimePosition before =
-          GetSplitPositionForInstruction(range, instruction_index);
-      LiveRange* second_part =
-          before.IsValid() ? Split(range, data(), before) : range;
-
-      if (range != second_part) scheduler().Schedule(range);
-
-      LifetimePosition after =
-          FindSplitPositionAfterCall(second_part, instruction_index);
-
-      if (after.IsValid()) {
-        ret = Split(second_part, data(), after);
-      } else {
-        ret = nullptr;
-      }
-      Spill(second_part);
-      return ret;
-    }
-  }
-  return ret;
-}
-
-
-bool GreedyAllocator::TrySplitAroundCalls(LiveRange* range) {
-  bool modified = false;
-
-  while (range != nullptr) {
-    LiveRange* remainder = GetRemainderAfterSplittingAroundFirstCall(range);
-    // If we performed no modification, we're done.
-    if (remainder == range) {
-      break;
-    }
-    // We performed a modification.
-    modified = true;
-    range = remainder;
-  }
-  // If we have a remainder and we made modifications, it means the remainder
-  // has no calls and we should schedule it for further processing. If we made
-  // no modifications, we will just return false, because we want the algorithm
-  // to make progress by trying some other heuristic.
-  if (modified && range != nullptr) {
-    DCHECK(!range->spilled());
-    DCHECK(!range->HasRegisterAssigned());
-    scheduler().Schedule(range);
-  }
-  return modified;
-}
-
-
-LifetimePosition GreedyAllocator::FindSplitPositionAfterCall(
-    const LiveRange* range, int call_index) {
-  LifetimePosition after_call =
-      Max(range->Start(),
-          LifetimePosition::GapFromInstructionIndex(call_index + 1));
-  UsePosition* next_use = range->NextRegisterPosition(after_call);
-  if (!next_use) return LifetimePosition::Invalid();
-
-  LifetimePosition split_pos = FindOptimalSplitPos(after_call, next_use->pos());
-  split_pos =
-      GetSplitPositionForInstruction(range, split_pos.ToInstructionIndex());
-  return split_pos;
-}
-
-
-LifetimePosition GreedyAllocator::FindSplitPositionBeforeLoops(
-    LiveRange* range) {
-  LifetimePosition end = range->End();
-  if (end.ToInstructionIndex() >= code()->LastInstructionIndex()) {
-    end =
-        LifetimePosition::GapFromInstructionIndex(end.ToInstructionIndex() - 1);
-  }
-  LifetimePosition pos = FindOptimalSplitPos(range->Start(), end);
-  pos = GetSplitPositionForInstruction(range, pos.ToInstructionIndex());
-  return pos;
-}
-
-
-void GreedyAllocator::SplitOrSpillBlockedRange(LiveRange* range) {
-  if (TrySplitAroundCalls(range)) return;
-
-  LifetimePosition pos = FindSplitPositionBeforeLoops(range);
-
-  if (!pos.IsValid()) pos = GetLastResortSplitPosition(range);
-  if (pos.IsValid()) {
-    LiveRange* tail = Split(range, data(), pos);
-    DCHECK(tail != range);
-    scheduler().Schedule(tail);
-    scheduler().Schedule(range);
-    return;
-  }
-  SpillRangeAsLastResort(range);
-}
-
-
-// Basic heuristic for advancing the algorithm, if any other splitting heuristic
-// failed.
-LifetimePosition GreedyAllocator::GetLastResortSplitPosition(
-    const LiveRange* range) {
-  LifetimePosition previous = range->Start();
-  for (UsePosition *pos = range->NextRegisterPosition(previous); pos != nullptr;
-       previous = previous.NextFullStart(),
-                   pos = range->NextRegisterPosition(previous)) {
-    LifetimePosition optimal = FindOptimalSplitPos(previous, pos->pos());
-    LifetimePosition before =
-        GetSplitPositionForInstruction(range, optimal.ToInstructionIndex());
-    if (before.IsValid()) return before;
-    LifetimePosition after = GetSplitPositionForInstruction(
-        range, pos->pos().ToInstructionIndex() + 1);
-    if (after.IsValid()) return after;
-  }
-  return LifetimePosition::Invalid();
-}
-
-
-bool GreedyAllocator::IsProgressPossible(const LiveRange* range) {
-  return range->CanBeSpilled(range->Start()) ||
-         GetLastResortSplitPosition(range).IsValid();
-}
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/greedy-allocator.h b/src/compiler/greedy-allocator.h
deleted file mode 100644
index b61ba42..0000000
--- a/src/compiler/greedy-allocator.h
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_GREEDY_ALLOCATOR_H_
-#define V8_GREEDY_ALLOCATOR_H_
-
-#include "src/compiler/coalesced-live-ranges.h"
-#include "src/compiler/register-allocator.h"
-#include "src/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-
-// The object of allocation scheduling. At minimum, this is a LiveRange, but
-// we may extend this to groups of LiveRanges. It has to be comparable.
-class AllocationCandidate {
- public:
-  explicit AllocationCandidate(LiveRange* range)
-      : is_group_(false), size_(range->GetSize()) {
-    candidate_.range_ = range;
-  }
-
-  explicit AllocationCandidate(LiveRangeGroup* ranges)
-      : is_group_(true), size_(CalculateGroupSize(ranges)) {
-    candidate_.group_ = ranges;
-  }
-
-  // Strict ordering operators
-  bool operator<(const AllocationCandidate& other) const {
-    return size() < other.size();
-  }
-
-  bool operator>(const AllocationCandidate& other) const {
-    return size() > other.size();
-  }
-
-  bool is_group() const { return is_group_; }
-  LiveRange* live_range() const { return candidate_.range_; }
-  LiveRangeGroup* group() const { return candidate_.group_; }
-
- private:
-  unsigned CalculateGroupSize(LiveRangeGroup* group) {
-    unsigned ret = 0;
-    for (LiveRange* range : group->ranges()) {
-      ret += range->GetSize();
-    }
-    return ret;
-  }
-
-  unsigned size() const { return size_; }
-  bool is_group_;
-  unsigned size_;
-  union {
-    LiveRange* range_;
-    LiveRangeGroup* group_;
-  } candidate_;
-};
-
-
-// Schedule processing (allocating) of AllocationCandidates.
-class AllocationScheduler final : ZoneObject {
- public:
-  explicit AllocationScheduler(Zone* zone) : queue_(zone) {}
-  void Schedule(LiveRange* range);
-  void Schedule(LiveRangeGroup* group);
-  AllocationCandidate GetNext();
-  bool empty() const { return queue_.empty(); }
-
- private:
-  typedef ZonePriorityQueue<AllocationCandidate> ScheduleQueue;
-  ScheduleQueue queue_;
-
-  DISALLOW_COPY_AND_ASSIGN(AllocationScheduler);
-};
-
-
-// A variant of the LLVM Greedy Register Allocator. See
-// http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html
-class GreedyAllocator final : public RegisterAllocator {
- public:
-  explicit GreedyAllocator(RegisterAllocationData* data, RegisterKind kind,
-                           Zone* local_zone);
-
-  void AllocateRegisters();
-
- private:
-  static const float kAllocatedRangeMultiplier;
-
-  static void UpdateWeightAtAllocation(LiveRange* range) {
-    DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
-    range->set_weight(range->weight() * kAllocatedRangeMultiplier);
-  }
-
-
-  static void UpdateWeightAtEviction(LiveRange* range) {
-    DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
-    range->set_weight(range->weight() / kAllocatedRangeMultiplier);
-  }
-
-  AllocationScheduler& scheduler() { return scheduler_; }
-  CoalescedLiveRanges* current_allocations(unsigned i) {
-    return allocations_[i];
-  }
-
-  CoalescedLiveRanges* current_allocations(unsigned i) const {
-    return allocations_[i];
-  }
-
-  Zone* local_zone() const { return local_zone_; }
-  ZoneVector<LiveRangeGroup*>& groups() { return groups_; }
-  const ZoneVector<LiveRangeGroup*>& groups() const { return groups_; }
-
-  // Insert fixed ranges.
-  void PreallocateFixedRanges();
-
-  void GroupLiveRanges();
-
-  // Schedule unassigned live ranges for allocation.
-  void ScheduleAllocationCandidates();
-
-  void AllocateRegisterToRange(unsigned reg_id, LiveRange* range) {
-    UpdateWeightAtAllocation(range);
-    current_allocations(reg_id)->AllocateRange(range);
-  }
-  // Evict and reschedule conflicts of a given range, at a given register.
-  void EvictAndRescheduleConflicts(unsigned reg_id, const LiveRange* range);
-
-  void TryAllocateCandidate(const AllocationCandidate& candidate);
-  void TryAllocateLiveRange(LiveRange* range);
-  void TryAllocateGroup(LiveRangeGroup* group);
-
-  // Calculate the weight of a candidate for allocation.
-  void EnsureValidRangeWeight(LiveRange* range);
-
-  // Calculate the new weight of a range that is about to be allocated.
-  float GetAllocatedRangeWeight(float candidate_weight);
-
-  // Returns kInvalidWeight if there are no conflicts, or the largest weight of
-  // a range conflicting with the given range, at the given register.
-  float GetMaximumConflictingWeight(unsigned reg_id, const LiveRange* range,
-                                    float competing_weight) const;
-
-  // Returns kInvalidWeight if there are no conflicts, or the largest weight of
-  // a range conflicting with the given range, at the given register.
-  float GetMaximumConflictingWeight(unsigned reg_id,
-                                    const LiveRangeGroup* group,
-                                    float group_weight) const;
-
-  // This is the extension point for splitting heuristics.
-  void SplitOrSpillBlockedRange(LiveRange* range);
-
-  // Find a good position where to fill, after a range was spilled after a call.
-  LifetimePosition FindSplitPositionAfterCall(const LiveRange* range,
-                                              int call_index);
-  // Split a range around all calls it passes over. Returns true if any changes
-  // were made, or false if no calls were found.
-  bool TrySplitAroundCalls(LiveRange* range);
-
-  // Find a split position at the outmost loop.
-  LifetimePosition FindSplitPositionBeforeLoops(LiveRange* range);
-
-  // Finds the first call instruction in the path of this range. Splits before
-  // and requeues that segment (if any), spills the section over the call, and
-  // returns the section after the call. The return is:
-  // - same range, if no call was found
-  // - nullptr, if the range finished at the call and there's no "after the
-  //   call" portion.
-  // - the portion after the call.
-  LiveRange* GetRemainderAfterSplittingAroundFirstCall(LiveRange* range);
-
-  // While we attempt to merge spill ranges later on in the allocation pipeline,
-  // we want to ensure group elements get merged. Waiting until later may hinder
-  // merge-ability, since the pipeline merger (being naive) may create conflicts
-  // between spill ranges of group members.
-  void TryReuseSpillRangesForGroups();
-
-  LifetimePosition GetLastResortSplitPosition(const LiveRange* range);
-
-  bool IsProgressPossible(const LiveRange* range);
-
-  // Necessary heuristic: spill when all else failed.
-  void SpillRangeAsLastResort(LiveRange* range);
-
-  void AssignRangeToRegister(int reg_id, LiveRange* range);
-
-  Zone* local_zone_;
-  ZoneVector<CoalescedLiveRanges*> allocations_;
-  AllocationScheduler scheduler_;
-  ZoneVector<LiveRangeGroup*> groups_;
-
-  DISALLOW_COPY_AND_ASSIGN(GreedyAllocator);
-};
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
-#endif  // V8_GREEDY_ALLOCATOR_H_
diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc
index a9083e1..6df22f6 100644
--- a/src/compiler/ia32/code-generator-ia32.cc
+++ b/src/compiler/ia32/code-generator-ia32.cc
@@ -67,6 +67,7 @@
     Constant constant = ToConstant(operand);
     if (constant.type() == Constant::kInt32 &&
         (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+         constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
          constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE)) {
       return Immediate(reinterpret_cast<Address>(constant.ToInt32()),
                        constant.rmode());
@@ -119,8 +120,8 @@
       }
       case kMode_MRI: {
         Register base = InputRegister(NextOffset(offset));
-        int32_t disp = InputInt32(NextOffset(offset));
-        return Operand(base, disp);
+        Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+        return Operand(base, ctant.ToInt32(), ctant.rmode());
       }
       case kMode_MR1:
       case kMode_MR2:
@@ -139,8 +140,8 @@
         Register base = InputRegister(NextOffset(offset));
         Register index = InputRegister(NextOffset(offset));
         ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
-        int32_t disp = InputInt32(NextOffset(offset));
-        return Operand(base, index, scale, disp);
+        Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+        return Operand(base, index, scale, ctant.ToInt32(), ctant.rmode());
       }
       case kMode_M1:
       case kMode_M2:
@@ -157,12 +158,12 @@
       case kMode_M8I: {
         Register index = InputRegister(NextOffset(offset));
         ScaleFactor scale = ScaleFor(kMode_M1I, mode);
-        int32_t disp = InputInt32(NextOffset(offset));
-        return Operand(index, scale, disp);
+        Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+        return Operand(index, scale, ctant.ToInt32(), ctant.rmode());
       }
       case kMode_MI: {
-        int32_t disp = InputInt32(NextOffset(offset));
-        return Operand(Immediate(disp));
+        Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+        return Operand(ctant.ToInt32(), ctant.rmode());
       }
       case kMode_None:
         UNREACHABLE();
@@ -363,6 +364,37 @@
     }                                                                 \
   } while (0)
 
+#define ASSEMBLE_IEEE754_BINOP(name)                                          \
+  do {                                                                        \
+    /* Pass two doubles as arguments on the stack. */                         \
+    __ PrepareCallCFunction(4, eax);                                          \
+    __ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0));        \
+    __ movsd(Operand(esp, 1 * kDoubleSize), i.InputDoubleRegister(1));        \
+    __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+                     4);                                                      \
+    /* Return value is in st(0) on ia32. */                                   \
+    /* Store it into the result register. */                                  \
+    __ sub(esp, Immediate(kDoubleSize));                                      \
+    __ fstp_d(Operand(esp, 0));                                               \
+    __ movsd(i.OutputDoubleRegister(), Operand(esp, 0));                      \
+    __ add(esp, Immediate(kDoubleSize));                                      \
+  } while (false)
+
+#define ASSEMBLE_IEEE754_UNOP(name)                                           \
+  do {                                                                        \
+    /* Pass one double as argument on the stack. */                           \
+    __ PrepareCallCFunction(2, eax);                                          \
+    __ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0));        \
+    __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+                     2);                                                      \
+    /* Return value is in st(0) on ia32. */                                   \
+    /* Store it into the result register. */                                  \
+    __ sub(esp, Immediate(kDoubleSize));                                      \
+    __ fstp_d(Operand(esp, 0));                                               \
+    __ movsd(i.OutputDoubleRegister(), Operand(esp, 0));                      \
+    __ add(esp, Immediate(kDoubleSize));                                      \
+  } while (false)
+
 void CodeGenerator::AssembleDeconstructFrame() {
   __ mov(esp, ebp);
   __ pop(ebp);
@@ -539,6 +571,14 @@
     case kArchTableSwitch:
       AssembleArchTableSwitch(instr);
       break;
+    case kArchComment: {
+      Address comment_string = i.InputExternalReference(0).address();
+      __ RecordComment(reinterpret_cast<const char*>(comment_string));
+      break;
+    }
+    case kArchDebugBreak:
+      __ int3();
+      break;
     case kArchNop:
     case kArchThrowTerminator:
       // don't emit code for nops.
@@ -609,6 +649,45 @@
       __ lea(i.OutputRegister(), Operand(base, offset.offset()));
       break;
     }
+    case kIeee754Float64Atan:
+      ASSEMBLE_IEEE754_UNOP(atan);
+      break;
+    case kIeee754Float64Atan2:
+      ASSEMBLE_IEEE754_BINOP(atan2);
+      break;
+    case kIeee754Float64Cbrt:
+      ASSEMBLE_IEEE754_UNOP(cbrt);
+      break;
+    case kIeee754Float64Cos:
+      ASSEMBLE_IEEE754_UNOP(cos);
+      break;
+    case kIeee754Float64Expm1:
+      ASSEMBLE_IEEE754_UNOP(expm1);
+      break;
+    case kIeee754Float64Exp:
+      ASSEMBLE_IEEE754_UNOP(exp);
+      break;
+    case kIeee754Float64Atanh:
+      ASSEMBLE_IEEE754_UNOP(atanh);
+      break;
+    case kIeee754Float64Log:
+      ASSEMBLE_IEEE754_UNOP(log);
+      break;
+    case kIeee754Float64Log1p:
+      ASSEMBLE_IEEE754_UNOP(log1p);
+      break;
+    case kIeee754Float64Log2:
+      ASSEMBLE_IEEE754_UNOP(log2);
+      break;
+    case kIeee754Float64Log10:
+      ASSEMBLE_IEEE754_UNOP(log10);
+      break;
+    case kIeee754Float64Sin:
+      ASSEMBLE_IEEE754_UNOP(sin);
+      break;
+    case kIeee754Float64Tan:
+      ASSEMBLE_IEEE754_UNOP(tan);
+      break;
     case kIA32Add:
       if (HasImmediateInput(instr, 1)) {
         __ add(i.InputOperand(0), i.InputImmediate(1));
@@ -1113,6 +1192,10 @@
       __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
       break;
     }
+    case kSSEFloat64SilenceNaN:
+      __ xorpd(kScratchDoubleReg, kScratchDoubleReg);
+      __ subsd(i.InputDoubleRegister(0), kScratchDoubleReg);
+      break;
     case kIA32Movsxbl:
       __ movsx_b(i.OutputRegister(), i.MemoryOperand());
       break;
@@ -1227,9 +1310,9 @@
     }
     case kIA32PushFloat32:
       if (instr->InputAt(0)->IsFPRegister()) {
-        __ sub(esp, Immediate(kDoubleSize));
+        __ sub(esp, Immediate(kFloatSize));
         __ movss(Operand(esp, 0), i.InputDoubleRegister(0));
-        frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+        frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
       } else if (HasImmediateInput(instr, 0)) {
         __ Move(kScratchDoubleReg, i.InputDouble(0));
         __ sub(esp, Immediate(kDoubleSize));
@@ -1261,9 +1344,9 @@
       break;
     case kIA32Push:
       if (instr->InputAt(0)->IsFPRegister()) {
-        __ sub(esp, Immediate(kDoubleSize));
+        __ sub(esp, Immediate(kFloatSize));
         __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
-        frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+        frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
       } else if (HasImmediateInput(instr, 0)) {
         __ push(i.InputImmediate(0));
         frame_access_state()->IncreaseSPDelta(1);
diff --git a/src/compiler/ia32/instruction-codes-ia32.h b/src/compiler/ia32/instruction-codes-ia32.h
index 79dd05e..09d4615 100644
--- a/src/compiler/ia32/instruction-codes-ia32.h
+++ b/src/compiler/ia32/instruction-codes-ia32.h
@@ -81,6 +81,7 @@
   V(SSEFloat64InsertLowWord32)     \
   V(SSEFloat64InsertHighWord32)    \
   V(SSEFloat64LoadLowWord32)       \
+  V(SSEFloat64SilenceNaN)          \
   V(AVXFloat32Add)                 \
   V(AVXFloat32Sub)                 \
   V(AVXFloat32Mul)                 \
diff --git a/src/compiler/ia32/instruction-scheduler-ia32.cc b/src/compiler/ia32/instruction-scheduler-ia32.cc
index f341db4..f19c328 100644
--- a/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -84,6 +84,7 @@
     case kSSEFloat64InsertLowWord32:
     case kSSEFloat64InsertHighWord32:
     case kSSEFloat64LoadLowWord32:
+    case kSSEFloat64SilenceNaN:
     case kAVXFloat32Add:
     case kAVXFloat32Sub:
     case kAVXFloat32Mul:
diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc
index 9002d75..3ffdd30 100644
--- a/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/src/compiler/ia32/instruction-selector-ia32.cc
@@ -1014,7 +1014,6 @@
   VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
 }
 
-
 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
   VisitRO(this, node, kSSEFloat32Sqrt);
 }
@@ -1069,6 +1068,24 @@
   VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
 }
 
+void InstructionSelector::VisitFloat32Neg(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Neg(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+                                                   InstructionCode opcode) {
+  IA32OperandGenerator g(this);
+  Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+       g.UseRegister(node->InputAt(1)))
+      ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+                                                  InstructionCode opcode) {
+  IA32OperandGenerator g(this);
+  Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)))
+      ->MarkAsCall();
+}
 
 void InstructionSelector::EmitPrepareArguments(
     ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
@@ -1103,7 +1120,7 @@
           g.CanBeImmediate(input.node())
               ? g.UseImmediate(input.node())
               : IsSupported(ATOM) ||
-                        sequence()->IsFloat(GetVirtualRegister(input.node()))
+                        sequence()->IsFP(GetVirtualRegister(input.node()))
                     ? g.UseRegister(input.node())
                     : g.Use(input.node());
       if (input.type() == MachineType::Float32()) {
@@ -1583,6 +1600,12 @@
        g.UseRegister(left), g.Use(right));
 }
 
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSEFloat64SilenceNaN, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
 void InstructionSelector::VisitAtomicLoad(Node* node) {
   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
   DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
@@ -1656,6 +1679,13 @@
   return flags;
 }
 
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+  return MachineOperatorBuilder::AlignmentRequirements::
+      FullUnalignedAccessSupport();
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/instruction-codes.h b/src/compiler/instruction-codes.h
index 57868c6..0b3132f 100644
--- a/src/compiler/instruction-codes.h
+++ b/src/compiler/instruction-codes.h
@@ -56,6 +56,8 @@
   V(ArchLookupSwitch)                     \
   V(ArchTableSwitch)                      \
   V(ArchNop)                              \
+  V(ArchDebugBreak)                       \
+  V(ArchComment)                          \
   V(ArchThrowTerminator)                  \
   V(ArchDeoptimize)                       \
   V(ArchRet)                              \
@@ -86,7 +88,20 @@
   V(AtomicLoadWord32)                     \
   V(AtomicStoreWord8)                     \
   V(AtomicStoreWord16)                    \
-  V(AtomicStoreWord32)
+  V(AtomicStoreWord32)                    \
+  V(Ieee754Float64Atan)                   \
+  V(Ieee754Float64Atan2)                  \
+  V(Ieee754Float64Atanh)                  \
+  V(Ieee754Float64Cbrt)                   \
+  V(Ieee754Float64Cos)                    \
+  V(Ieee754Float64Exp)                    \
+  V(Ieee754Float64Expm1)                  \
+  V(Ieee754Float64Log)                    \
+  V(Ieee754Float64Log1p)                  \
+  V(Ieee754Float64Log10)                  \
+  V(Ieee754Float64Log2)                   \
+  V(Ieee754Float64Sin)                    \
+  V(Ieee754Float64Tan)
 
 #define ARCH_OPCODE_LIST(V)  \
   COMMON_ARCH_OPCODE_LIST(V) \
diff --git a/src/compiler/instruction-scheduler.cc b/src/compiler/instruction-scheduler.cc
index b3e4bbc..3ef7c08 100644
--- a/src/compiler/instruction-scheduler.cc
+++ b/src/compiler/instruction-scheduler.cc
@@ -222,6 +222,21 @@
     case kArchParentFramePointer:
     case kArchTruncateDoubleToI:
     case kArchStackSlot:
+    case kArchDebugBreak:
+    case kArchComment:
+    case kIeee754Float64Atan:
+    case kIeee754Float64Atan2:
+    case kIeee754Float64Atanh:
+    case kIeee754Float64Cbrt:
+    case kIeee754Float64Cos:
+    case kIeee754Float64Exp:
+    case kIeee754Float64Expm1:
+    case kIeee754Float64Log:
+    case kIeee754Float64Log1p:
+    case kIeee754Float64Log10:
+    case kIeee754Float64Log2:
+    case kIeee754Float64Sin:
+    case kIeee754Float64Tan:
       return kNoOpcodeFlags;
 
     case kArchStackPointer:
diff --git a/src/compiler/instruction-scheduler.h b/src/compiler/instruction-scheduler.h
index 23950f7..4f5b0f7 100644
--- a/src/compiler/instruction-scheduler.h
+++ b/src/compiler/instruction-scheduler.h
@@ -177,12 +177,12 @@
   // Identify nops used as a definition point for live-in registers at
   // function entry.
   bool IsFixedRegisterParameter(const Instruction* instr) const {
-    return (instr->arch_opcode() == kArchNop) &&
-      (instr->OutputCount() == 1) &&
-      (instr->OutputAt(0)->IsUnallocated()) &&
-      (UnallocatedOperand::cast(instr->OutputAt(0))->HasFixedRegisterPolicy() ||
-       UnallocatedOperand::cast(
-           instr->OutputAt(0))->HasFixedDoubleRegisterPolicy());
+    return (instr->arch_opcode() == kArchNop) && (instr->OutputCount() == 1) &&
+           (instr->OutputAt(0)->IsUnallocated()) &&
+           (UnallocatedOperand::cast(instr->OutputAt(0))
+                ->HasFixedRegisterPolicy() ||
+            UnallocatedOperand::cast(instr->OutputAt(0))
+                ->HasFixedFPRegisterPolicy());
   }
 
   void ComputeTotalLatencies();
diff --git a/src/compiler/instruction-selector-impl.h b/src/compiler/instruction-selector-impl.h
index 301612c..be24e2d 100644
--- a/src/compiler/instruction-selector-impl.h
+++ b/src/compiler/instruction-selector-impl.h
@@ -54,9 +54,10 @@
                                            reg.code(), GetVReg(node)));
   }
 
-  InstructionOperand DefineAsFixed(Node* node, DoubleRegister reg) {
+  template <typename FPRegType>
+  InstructionOperand DefineAsFixed(Node* node, FPRegType reg) {
     return Define(node,
-                  UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
+                  UnallocatedOperand(UnallocatedOperand::FIXED_FP_REGISTER,
                                      reg.code(), GetVReg(node)));
   }
 
@@ -122,10 +123,10 @@
                                         reg.code(), GetVReg(node)));
   }
 
-  InstructionOperand UseFixed(Node* node, DoubleRegister reg) {
-    return Use(node,
-               UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
-                                  reg.code(), GetVReg(node)));
+  template <typename FPRegType>
+  InstructionOperand UseFixed(Node* node, FPRegType reg) {
+    return Use(node, UnallocatedOperand(UnallocatedOperand::FIXED_FP_REGISTER,
+                                        reg.code(), GetVReg(node)));
   }
 
   InstructionOperand UseExplicit(LinkageLocation location) {
@@ -218,6 +219,7 @@
       case IrOpcode::kNumberConstant:
         return Constant(OpParameter<double>(node));
       case IrOpcode::kExternalConstant:
+      case IrOpcode::kComment:
         return Constant(OpParameter<ExternalReference>(node));
       case IrOpcode::kHeapConstant:
         return Constant(OpParameter<Handle<HeapObject>>(node));
@@ -274,7 +276,7 @@
     }
     // a fixed register.
     if (IsFloatingPoint(rep)) {
-      return UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
+      return UnallocatedOperand(UnallocatedOperand::FIXED_FP_REGISTER,
                                 location.AsRegister(), virtual_register);
     }
     return UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc
index ea68c78..558aff3 100644
--- a/src/compiler/instruction-selector.cc
+++ b/src/compiler/instruction-selector.cc
@@ -906,6 +906,12 @@
     case IrOpcode::kStateValues:
     case IrOpcode::kObjectState:
       return;
+    case IrOpcode::kDebugBreak:
+      VisitDebugBreak(node);
+      return;
+    case IrOpcode::kComment:
+      VisitComment(node);
+      return;
     case IrOpcode::kLoad: {
       LoadRepresentation type = LoadRepresentationOf(node->op());
       MarkAsRepresentation(type.representation(), node);
@@ -1029,6 +1035,13 @@
       return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
     case IrOpcode::kChangeFloat64ToUint32:
       return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
+    case IrOpcode::kFloat64SilenceNaN:
+      MarkAsFloat64(node);
+      if (CanProduceSignalingNaN(node->InputAt(0))) {
+        return VisitFloat64SilenceNaN(node);
+      } else {
+        return EmitIdentity(node);
+      }
     case IrOpcode::kTruncateFloat64ToUint32:
       return MarkAsWord32(node), VisitTruncateFloat64ToUint32(node);
     case IrOpcode::kTruncateFloat32ToInt32:
@@ -1081,6 +1094,8 @@
       return MarkAsFloat32(node), VisitFloat32Sub(node);
     case IrOpcode::kFloat32SubPreserveNan:
       return MarkAsFloat32(node), VisitFloat32SubPreserveNan(node);
+    case IrOpcode::kFloat32Neg:
+      return MarkAsFloat32(node), VisitFloat32Neg(node);
     case IrOpcode::kFloat32Mul:
       return MarkAsFloat32(node), VisitFloat32Mul(node);
     case IrOpcode::kFloat32Div:
@@ -1105,6 +1120,8 @@
       return MarkAsFloat64(node), VisitFloat64Sub(node);
     case IrOpcode::kFloat64SubPreserveNan:
       return MarkAsFloat64(node), VisitFloat64SubPreserveNan(node);
+    case IrOpcode::kFloat64Neg:
+      return MarkAsFloat64(node), VisitFloat64Neg(node);
     case IrOpcode::kFloat64Mul:
       return MarkAsFloat64(node), VisitFloat64Mul(node);
     case IrOpcode::kFloat64Div:
@@ -1117,8 +1134,34 @@
       return MarkAsFloat64(node), VisitFloat64Max(node);
     case IrOpcode::kFloat64Abs:
       return MarkAsFloat64(node), VisitFloat64Abs(node);
+    case IrOpcode::kFloat64Atan:
+      return MarkAsFloat64(node), VisitFloat64Atan(node);
+    case IrOpcode::kFloat64Atan2:
+      return MarkAsFloat64(node), VisitFloat64Atan2(node);
+    case IrOpcode::kFloat64Atanh:
+      return MarkAsFloat64(node), VisitFloat64Atanh(node);
+    case IrOpcode::kFloat64Cbrt:
+      return MarkAsFloat64(node), VisitFloat64Cbrt(node);
+    case IrOpcode::kFloat64Cos:
+      return MarkAsFloat64(node), VisitFloat64Cos(node);
+    case IrOpcode::kFloat64Exp:
+      return MarkAsFloat64(node), VisitFloat64Exp(node);
+    case IrOpcode::kFloat64Expm1:
+      return MarkAsFloat64(node), VisitFloat64Expm1(node);
+    case IrOpcode::kFloat64Log:
+      return MarkAsFloat64(node), VisitFloat64Log(node);
+    case IrOpcode::kFloat64Log1p:
+      return MarkAsFloat64(node), VisitFloat64Log1p(node);
+    case IrOpcode::kFloat64Log10:
+      return MarkAsFloat64(node), VisitFloat64Log10(node);
+    case IrOpcode::kFloat64Log2:
+      return MarkAsFloat64(node), VisitFloat64Log2(node);
+    case IrOpcode::kFloat64Sin:
+      return MarkAsFloat64(node), VisitFloat64Sin(node);
     case IrOpcode::kFloat64Sqrt:
       return MarkAsFloat64(node), VisitFloat64Sqrt(node);
+    case IrOpcode::kFloat64Tan:
+      return MarkAsFloat64(node), VisitFloat64Tan(node);
     case IrOpcode::kFloat64Equal:
       return VisitFloat64Equal(node);
     case IrOpcode::kFloat64LessThan:
@@ -1222,6 +1265,58 @@
   Emit(kArchParentFramePointer, g.DefineAsRegister(node));
 }
 
+void InstructionSelector::VisitFloat64Atan(Node* node) {
+  VisitFloat64Ieee754Unop(node, kIeee754Float64Atan);
+}
+
+void InstructionSelector::VisitFloat64Atan2(Node* node) {
+  VisitFloat64Ieee754Binop(node, kIeee754Float64Atan2);
+}
+
+void InstructionSelector::VisitFloat64Atanh(Node* node) {
+  VisitFloat64Ieee754Unop(node, kIeee754Float64Atanh);
+}
+
+void InstructionSelector::VisitFloat64Cbrt(Node* node) {
+  VisitFloat64Ieee754Unop(node, kIeee754Float64Cbrt);
+}
+
+void InstructionSelector::VisitFloat64Cos(Node* node) {
+  VisitFloat64Ieee754Unop(node, kIeee754Float64Cos);
+}
+
+void InstructionSelector::VisitFloat64Exp(Node* node) {
+  VisitFloat64Ieee754Unop(node, kIeee754Float64Exp);
+}
+
+void InstructionSelector::VisitFloat64Expm1(Node* node) {
+  VisitFloat64Ieee754Unop(node, kIeee754Float64Expm1);
+}
+
+void InstructionSelector::VisitFloat64Log(Node* node) {
+  VisitFloat64Ieee754Unop(node, kIeee754Float64Log);
+}
+
+void InstructionSelector::VisitFloat64Log1p(Node* node) {
+  VisitFloat64Ieee754Unop(node, kIeee754Float64Log1p);
+}
+
+void InstructionSelector::VisitFloat64Log2(Node* node) {
+  VisitFloat64Ieee754Unop(node, kIeee754Float64Log2);
+}
+
+void InstructionSelector::VisitFloat64Log10(Node* node) {
+  VisitFloat64Ieee754Unop(node, kIeee754Float64Log10);
+}
+
+void InstructionSelector::VisitFloat64Sin(Node* node) {
+  VisitFloat64Ieee754Unop(node, kIeee754Float64Sin);
+}
+
+void InstructionSelector::VisitFloat64Tan(Node* node) {
+  VisitFloat64Ieee754Unop(node, kIeee754Float64Tan);
+}
+
 void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
                                           InstructionOperand& index_operand) {
   OperandGenerator g(this);
@@ -1267,9 +1362,7 @@
 }
 
 void InstructionSelector::VisitBitcastWordToTagged(Node* node) {
-  OperandGenerator g(this);
-  Node* value = node->InputAt(0);
-  Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+  EmitIdentity(node);
 }
 
 // 32 bit targets do not implement the following instructions.
@@ -1441,12 +1534,7 @@
 void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
 #endif  // V8_TARGET_ARCH_64_BIT
 
-void InstructionSelector::VisitFinishRegion(Node* node) {
-  OperandGenerator g(this);
-  Node* value = node->InputAt(0);
-  Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
-}
-
+void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
 
 void InstructionSelector::VisitParameter(Node* node) {
   OperandGenerator g(this);
@@ -1772,6 +1860,12 @@
               nullptr);
 }
 
+void InstructionSelector::EmitIdentity(Node* node) {
+  OperandGenerator g(this);
+  Node* value = node->InputAt(0);
+  Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+}
+
 void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind, Node* value) {
   InstructionCode opcode = kArchDeoptimize;
   switch (kind) {
@@ -1791,6 +1885,26 @@
   Emit(kArchThrowTerminator, g.NoOutput());
 }
 
+void InstructionSelector::VisitDebugBreak(Node* node) {
+  OperandGenerator g(this);
+  Emit(kArchDebugBreak, g.NoOutput());
+}
+
+void InstructionSelector::VisitComment(Node* node) {
+  OperandGenerator g(this);
+  InstructionOperand operand(g.UseImmediate(node));
+  Emit(kArchComment, 0, nullptr, 1, &operand);
+}
+
+bool InstructionSelector::CanProduceSignalingNaN(Node* node) {
+  // TODO(jarin) Improve the heuristic here.
+  if (node->opcode() == IrOpcode::kFloat64Add ||
+      node->opcode() == IrOpcode::kFloat64Sub ||
+      node->opcode() == IrOpcode::kFloat64Mul) {
+    return false;
+  }
+  return true;
+}
 
 FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
     Node* state) {
diff --git a/src/compiler/instruction-selector.h b/src/compiler/instruction-selector.h
index 335099f..8ac8e7b 100644
--- a/src/compiler/instruction-selector.h
+++ b/src/compiler/instruction-selector.h
@@ -139,6 +139,8 @@
   // TODO(sigurds) This should take a CpuFeatures argument.
   static MachineOperatorBuilder::Flags SupportedMachineOperatorFlags();
 
+  static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements();
+
   // ===========================================================================
   // ============ Architecture-independent graph covering methods. =============
   // ===========================================================================
@@ -242,6 +244,10 @@
   // Visit the node and generate code, if any.
   void VisitNode(Node* node);
 
+  // Visit the node and generate code for IEEE 754 functions.
+  void VisitFloat64Ieee754Binop(Node*, InstructionCode code);
+  void VisitFloat64Ieee754Unop(Node*, InstructionCode code);
+
 #define DECLARE_GENERATOR(x) void Visit##x(Node* node);
   MACHINE_OP_LIST(DECLARE_GENERATOR)
 #undef DECLARE_GENERATOR
@@ -267,6 +273,9 @@
   void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
                             const CallDescriptor* descriptor, Node* node);
 
+  void EmitIdentity(Node* node);
+  bool CanProduceSignalingNaN(Node* node);
+
   // ===========================================================================
 
   Schedule* schedule() const { return schedule_; }
diff --git a/src/compiler/instruction.cc b/src/compiler/instruction.cc
index 26aebca..1ef42d6 100644
--- a/src/compiler/instruction.cc
+++ b/src/compiler/instruction.cc
@@ -12,6 +12,7 @@
 namespace internal {
 namespace compiler {
 
+const auto GetRegConfig = RegisterConfiguration::Turbofan;
 
 FlagsCondition CommuteFlagsCondition(FlagsCondition condition) {
   switch (condition) {
@@ -59,6 +60,16 @@
   return condition;
 }
 
+bool InstructionOperand::InterferesWith(const InstructionOperand& that) const {
+  if (!IsFPRegister() || !that.IsFPRegister() || kSimpleFPAliasing)
+    return EqualsCanonicalized(that);
+  // Both operands are fp registers and aliasing is non-simple.
+  const LocationOperand& loc1 = *LocationOperand::cast(this);
+  const LocationOperand& loc2 = LocationOperand::cast(that);
+  return GetRegConfig()->AreAliases(loc1.representation(), loc1.register_code(),
+                                    loc2.representation(),
+                                    loc2.register_code());
+}
 
 void InstructionOperand::Print(const RegisterConfiguration* config) const {
   OFStream os(stdout);
@@ -68,13 +79,7 @@
   os << wrapper << std::endl;
 }
 
-
-void InstructionOperand::Print() const {
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
-  Print(config);
-}
-
+void InstructionOperand::Print() const { Print(GetRegConfig()); }
 
 std::ostream& operator<<(std::ostream& os,
                          const PrintableInstructionOperand& printable) {
@@ -95,7 +100,7 @@
                     << conf->GetGeneralRegisterName(
                            unalloc->fixed_register_index())
                     << ")";
-        case UnallocatedOperand::FIXED_DOUBLE_REGISTER:
+        case UnallocatedOperand::FIXED_FP_REGISTER:
           return os << "(="
                     << conf->GetDoubleRegisterName(
                            unalloc->fixed_register_index())
@@ -126,14 +131,21 @@
     case InstructionOperand::ALLOCATED: {
       LocationOperand allocated = LocationOperand::cast(op);
       if (op.IsStackSlot()) {
-        os << "[stack:" << LocationOperand::cast(op).index();
+        os << "[stack:" << allocated.index();
       } else if (op.IsFPStackSlot()) {
-        os << "[fp_stack:" << LocationOperand::cast(op).index();
+        os << "[fp_stack:" << allocated.index();
       } else if (op.IsRegister()) {
-        os << "[" << LocationOperand::cast(op).GetRegister().ToString() << "|R";
+        os << "["
+           << GetRegConfig()->GetGeneralRegisterName(allocated.register_code())
+           << "|R";
+      } else if (op.IsDoubleRegister()) {
+        os << "["
+           << GetRegConfig()->GetDoubleRegisterName(allocated.register_code())
+           << "|R";
       } else {
-        DCHECK(op.IsFPRegister());
-        os << "[" << LocationOperand::cast(op).GetDoubleRegister().ToString()
+        DCHECK(op.IsFloatRegister());
+        os << "["
+           << GetRegConfig()->GetFloatRegisterName(allocated.register_code())
            << "|R";
       }
       if (allocated.IsExplicit()) {
@@ -180,7 +192,6 @@
   return os;
 }
 
-
 void MoveOperands::Print(const RegisterConfiguration* config) const {
   OFStream os(stdout);
   PrintableInstructionOperand wrapper;
@@ -191,13 +202,7 @@
   os << wrapper << std::endl;
 }
 
-
-void MoveOperands::Print() const {
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
-  Print(config);
-}
-
+void MoveOperands::Print() const { Print(GetRegConfig()); }
 
 std::ostream& operator<<(std::ostream& os,
                          const PrintableMoveOperands& printable) {
@@ -246,9 +251,11 @@
                                  int index)
     : LocationOperand(EXPLICIT, kind, rep, index) {
   DCHECK_IMPLIES(kind == REGISTER && !IsFloatingPoint(rep),
-                 Register::from_code(index).IsAllocatable());
-  DCHECK_IMPLIES(kind == REGISTER && IsFloatingPoint(rep),
-                 DoubleRegister::from_code(index).IsAllocatable());
+                 GetRegConfig()->IsAllocatableGeneralCode(index));
+  DCHECK_IMPLIES(kind == REGISTER && rep == MachineRepresentation::kFloat32,
+                 GetRegConfig()->IsAllocatableFloatCode(index));
+  DCHECK_IMPLIES(kind == REGISTER && (rep == MachineRepresentation::kFloat64),
+                 GetRegConfig()->IsAllocatableDoubleCode(index));
 }
 
 Instruction::Instruction(InstructionCode opcode)
@@ -309,13 +316,7 @@
   os << wrapper << std::endl;
 }
 
-
-void Instruction::Print() const {
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
-  Print(config);
-}
-
+void Instruction::Print() const { Print(GetRegConfig()); }
 
 std::ostream& operator<<(std::ostream& os,
                          const PrintableParallelMove& printable) {
@@ -343,9 +344,7 @@
 std::ostream& operator<<(std::ostream& os, const ReferenceMap& pm) {
   os << "{";
   bool first = true;
-  PrintableInstructionOperand poi = {
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
-      InstructionOperand()};
+  PrintableInstructionOperand poi = {GetRegConfig(), InstructionOperand()};
   for (const InstructionOperand& op : pm.reference_operands_) {
     if (!first) {
       os << ";";
@@ -880,12 +879,7 @@
   os << wrapper << std::endl;
 }
 
-
-void InstructionSequence::Print() const {
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
-  Print(config);
-}
+void InstructionSequence::Print() const { Print(GetRegConfig()); }
 
 void InstructionSequence::PrintBlock(const RegisterConfiguration* config,
                                      int block_id) const {
@@ -939,9 +933,7 @@
 }
 
 void InstructionSequence::PrintBlock(int block_id) const {
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
-  PrintBlock(config, block_id);
+  PrintBlock(GetRegConfig(), block_id);
 }
 
 FrameStateDescriptor::FrameStateDescriptor(
diff --git a/src/compiler/instruction.h b/src/compiler/instruction.h
index 851ba24..7130c3d 100644
--- a/src/compiler/instruction.h
+++ b/src/compiler/instruction.h
@@ -103,6 +103,8 @@
     return this->GetCanonicalizedValue() < that.GetCanonicalizedValue();
   }
 
+  bool InterferesWith(const InstructionOperand& that) const;
+
   void Print(const RegisterConfiguration* config) const;
   void Print() const;
 
@@ -155,7 +157,7 @@
     NONE,
     ANY,
     FIXED_REGISTER,
-    FIXED_DOUBLE_REGISTER,
+    FIXED_FP_REGISTER,
     MUST_HAVE_REGISTER,
     MUST_HAVE_SLOT,
     SAME_AS_FIRST_INPUT
@@ -192,7 +194,7 @@
 
   UnallocatedOperand(ExtendedPolicy policy, int index, int virtual_register)
       : UnallocatedOperand(virtual_register) {
-    DCHECK(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER);
+    DCHECK(policy == FIXED_REGISTER || policy == FIXED_FP_REGISTER);
     value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
     value_ |= ExtendedPolicyField::encode(policy);
     value_ |= LifetimeField::encode(USED_AT_END);
@@ -220,7 +222,7 @@
   bool HasFixedPolicy() const {
     return basic_policy() == FIXED_SLOT ||
            extended_policy() == FIXED_REGISTER ||
-           extended_policy() == FIXED_DOUBLE_REGISTER;
+           extended_policy() == FIXED_FP_REGISTER;
   }
   bool HasRegisterPolicy() const {
     return basic_policy() == EXTENDED_POLICY &&
@@ -239,9 +241,9 @@
     return basic_policy() == EXTENDED_POLICY &&
            extended_policy() == FIXED_REGISTER;
   }
-  bool HasFixedDoubleRegisterPolicy() const {
+  bool HasFixedFPRegisterPolicy() const {
     return basic_policy() == EXTENDED_POLICY &&
-           extended_policy() == FIXED_DOUBLE_REGISTER;
+           extended_policy() == FIXED_FP_REGISTER;
   }
   bool HasSecondaryStorage() const {
     return basic_policy() == EXTENDED_POLICY &&
@@ -272,9 +274,9 @@
                             FixedSlotIndexField::kShift);
   }
 
-  // [fixed_register_index]: Only for FIXED_REGISTER or FIXED_DOUBLE_REGISTER.
+  // [fixed_register_index]: Only for FIXED_REGISTER or FIXED_FP_REGISTER.
   int fixed_register_index() const {
-    DCHECK(HasFixedRegisterPolicy() || HasFixedDoubleRegisterPolicy());
+    DCHECK(HasFixedRegisterPolicy() || HasFixedFPRegisterPolicy());
     return FixedRegisterField::decode(value_);
   }
 
@@ -421,30 +423,32 @@
     return static_cast<int64_t>(value_) >> IndexField::kShift;
   }
 
+  int register_code() const {
+    DCHECK(IsRegister() || IsFPRegister());
+    return static_cast<int64_t>(value_) >> IndexField::kShift;
+  }
+
   Register GetRegister() const {
     DCHECK(IsRegister());
-    return Register::from_code(static_cast<int64_t>(value_) >>
-                               IndexField::kShift);
+    return Register::from_code(register_code());
   }
 
   FloatRegister GetFloatRegister() const {
     DCHECK(IsFloatRegister());
-    return FloatRegister::from_code(static_cast<int64_t>(value_) >>
-                                    IndexField::kShift);
+    return FloatRegister::from_code(register_code());
   }
 
   DoubleRegister GetDoubleRegister() const {
-    // TODO(bbudge) Tighten this test to IsDoubleRegister when all code
-    // generators are changed to use the correct Get*Register method.
+    // On platforms where FloatRegister, DoubleRegister, and Simd128Register
+    // are all the same type, it's convenient to treat everything as a
+    // DoubleRegister, so be lax about type checking here.
     DCHECK(IsFPRegister());
-    return DoubleRegister::from_code(static_cast<int64_t>(value_) >>
-                                     IndexField::kShift);
+    return DoubleRegister::from_code(register_code());
   }
 
   Simd128Register GetSimd128Register() const {
     DCHECK(IsSimd128Register());
-    return Simd128Register::from_code(static_cast<int64_t>(value_) >>
-                                      IndexField::kShift);
+    return Simd128Register::from_code(register_code());
   }
 
   LocationKind location_kind() const {
@@ -601,20 +605,25 @@
 
 uint64_t InstructionOperand::GetCanonicalizedValue() const {
   if (IsAllocated() || IsExplicit()) {
-    // TODO(dcarney): put machine type last and mask.
-    MachineRepresentation canonicalized_representation =
-        IsFloatingPoint(LocationOperand::cast(this)->representation())
-            ? MachineRepresentation::kFloat64
-            : MachineRepresentation::kNone;
+    MachineRepresentation rep = LocationOperand::cast(this)->representation();
+    MachineRepresentation canonical = MachineRepresentation::kNone;
+    if (IsFloatingPoint(rep)) {
+      if (kSimpleFPAliasing) {
+        // Archs with simple aliasing can treat all FP operands the same.
+        canonical = MachineRepresentation::kFloat64;
+      } else {
+        // We need to distinguish FP operands of different reps when FP
+        // aliasing is not simple (e.g. ARM).
+        canonical = rep;
+      }
+    }
     return InstructionOperand::KindField::update(
-        LocationOperand::RepresentationField::update(
-            this->value_, canonicalized_representation),
+        LocationOperand::RepresentationField::update(this->value_, canonical),
         LocationOperand::EXPLICIT);
   }
   return this->value_;
 }
 
-
 // Required for maps that don't care about machine type.
 struct CompareOperandModuloType {
   bool operator()(const InstructionOperand& a,
@@ -649,9 +658,9 @@
   }
   void SetPending() { destination_ = InstructionOperand(); }
 
-  // True if this move a move into the given destination operand.
-  bool Blocks(const InstructionOperand& operand) const {
-    return !IsEliminated() && source().EqualsCanonicalized(operand);
+  // True if this move is a move into the given destination operand.
+  bool Blocks(const InstructionOperand& destination) const {
+    return !IsEliminated() && source().InterferesWith(destination);
   }
 
   // A move is redundant if it's been eliminated or if its source and
@@ -1326,9 +1335,17 @@
     return GetRepresentation(virtual_register) ==
            MachineRepresentation::kTagged;
   }
-  bool IsFloat(int virtual_register) const {
+  bool IsFP(int virtual_register) const {
     return IsFloatingPoint(GetRepresentation(virtual_register));
   }
+  bool IsFloat(int virtual_register) const {
+    return GetRepresentation(virtual_register) ==
+           MachineRepresentation::kFloat32;
+  }
+  bool IsDouble(int virtual_register) const {
+    return GetRepresentation(virtual_register) ==
+           MachineRepresentation::kFloat64;
+  }
 
   Instruction* GetBlockStart(RpoNumber rpo) const;
 
diff --git a/src/compiler/int64-lowering.cc b/src/compiler/int64-lowering.cc
index 830a0de..68d3772 100644
--- a/src/compiler/int64-lowering.cc
+++ b/src/compiler/int64-lowering.cc
@@ -32,6 +32,8 @@
       signature_(signature),
       placeholder_(graph->NewNode(common->Parameter(-2, "placeholder"),
                                   graph->start())) {
+  DCHECK_NOT_NULL(graph);
+  DCHECK_NOT_NULL(graph->end());
   replacements_ = zone->NewArray<Replacement>(graph->NodeCount());
   memset(replacements_, 0, sizeof(Replacement) * graph->NodeCount());
 }
@@ -98,6 +100,27 @@
   return result;
 }
 
+void Int64Lowering::GetIndexNodes(Node* index, Node*& index_low,
+                                  Node*& index_high) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+  index_low = index;
+  index_high = graph()->NewNode(machine()->Int32Add(), index,
+                                graph()->NewNode(common()->Int32Constant(4)));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+  index_low = graph()->NewNode(machine()->Int32Add(), index,
+                               graph()->NewNode(common()->Int32Constant(4)));
+  index_high = index;
+#endif
+}
+
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+const int Int64Lowering::kLowerWordOffset = 0;
+const int Int64Lowering::kHigherWordOffset = 4;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+const int Int64Lowering::kLowerWordOffset = 4;
+const int Int64Lowering::kHigherWordOffset = 0;
+#endif
+
 void Int64Lowering::LowerNode(Node* node) {
   switch (node->opcode()) {
     case IrOpcode::kInt64Constant: {
@@ -115,10 +138,9 @@
       if (load_rep.representation() == MachineRepresentation::kWord64) {
         Node* base = node->InputAt(0);
         Node* index = node->InputAt(1);
-        Node* index_high =
-            graph()->NewNode(machine()->Int32Add(), index,
-                             graph()->NewNode(common()->Int32Constant(4)));
-
+        Node* index_low;
+        Node* index_high;
+        GetIndexNodes(index, index_low, index_high);
         const Operator* load_op = machine()->Load(MachineType::Int32());
         Node* high_node;
         if (node->InputCount() > 2) {
@@ -132,6 +154,7 @@
         } else {
           high_node = graph()->NewNode(load_op, base, index_high);
         }
+        node->ReplaceInput(1, index_low);
         NodeProperties::ChangeOp(node, load_op);
         ReplaceNode(node, node, high_node);
       } else {
@@ -150,10 +173,9 @@
 
         Node* base = node->InputAt(0);
         Node* index = node->InputAt(1);
-        Node* index_high =
-            graph()->NewNode(machine()->Int32Add(), index,
-                             graph()->NewNode(common()->Int32Constant(4)));
-
+        Node* index_low;
+        Node* index_high;
+        GetIndexNodes(index, index_low, index_high);
         Node* value = node->InputAt(2);
         DCHECK(HasReplacementLow(value));
         DCHECK(HasReplacementHigh(value));
@@ -175,6 +197,7 @@
                                        GetReplacementHigh(value));
         }
 
+        node->ReplaceInput(1, index_low);
         node->ReplaceInput(2, GetReplacementLow(value));
         NodeProperties::ChangeOp(node, store_op);
         ReplaceNode(node, node, high_node);
@@ -241,8 +264,10 @@
       if (descriptor->ReturnCount() == 1 &&
           descriptor->GetReturnType(0) == MachineType::Int64()) {
         // We access the additional return values through projections.
-        Node* low_node = graph()->NewNode(common()->Projection(0), node);
-        Node* high_node = graph()->NewNode(common()->Projection(1), node);
+        Node* low_node =
+            graph()->NewNode(common()->Projection(0), node, graph()->start());
+        Node* high_node =
+            graph()->NewNode(common()->Projection(1), node, graph()->start());
         ReplaceNode(node, low_node, high_node);
       }
       break;
@@ -281,8 +306,10 @@
 
       NodeProperties::ChangeOp(node, machine()->Int32PairAdd());
       // We access the additional return values through projections.
-      Node* low_node = graph()->NewNode(common()->Projection(0), node);
-      Node* high_node = graph()->NewNode(common()->Projection(1), node);
+      Node* low_node =
+          graph()->NewNode(common()->Projection(0), node, graph()->start());
+      Node* high_node =
+          graph()->NewNode(common()->Projection(1), node, graph()->start());
       ReplaceNode(node, low_node, high_node);
       break;
     }
@@ -299,8 +326,10 @@
 
       NodeProperties::ChangeOp(node, machine()->Int32PairSub());
       // We access the additional return values through projections.
-      Node* low_node = graph()->NewNode(common()->Projection(0), node);
-      Node* high_node = graph()->NewNode(common()->Projection(1), node);
+      Node* low_node =
+          graph()->NewNode(common()->Projection(0), node, graph()->start());
+      Node* high_node =
+          graph()->NewNode(common()->Projection(1), node, graph()->start());
       ReplaceNode(node, low_node, high_node);
       break;
     }
@@ -317,8 +346,10 @@
 
       NodeProperties::ChangeOp(node, machine()->Int32PairMul());
       // We access the additional return values through projections.
-      Node* low_node = graph()->NewNode(common()->Projection(0), node);
-      Node* high_node = graph()->NewNode(common()->Projection(1), node);
+      Node* low_node =
+          graph()->NewNode(common()->Projection(0), node, graph()->start());
+      Node* high_node =
+          graph()->NewNode(common()->Projection(1), node, graph()->start());
       ReplaceNode(node, low_node, high_node);
       break;
     }
@@ -367,8 +398,10 @@
 
       NodeProperties::ChangeOp(node, machine()->Word32PairShl());
       // We access the additional return values through projections.
-      Node* low_node = graph()->NewNode(common()->Projection(0), node);
-      Node* high_node = graph()->NewNode(common()->Projection(1), node);
+      Node* low_node =
+          graph()->NewNode(common()->Projection(0), node, graph()->start());
+      Node* high_node =
+          graph()->NewNode(common()->Projection(1), node, graph()->start());
       ReplaceNode(node, low_node, high_node);
       break;
     }
@@ -389,8 +422,10 @@
 
       NodeProperties::ChangeOp(node, machine()->Word32PairShr());
       // We access the additional return values through projections.
-      Node* low_node = graph()->NewNode(common()->Projection(0), node);
-      Node* high_node = graph()->NewNode(common()->Projection(1), node);
+      Node* low_node =
+          graph()->NewNode(common()->Projection(0), node, graph()->start());
+      Node* high_node =
+          graph()->NewNode(common()->Projection(1), node, graph()->start());
       ReplaceNode(node, low_node, high_node);
       break;
     }
@@ -411,8 +446,10 @@
 
       NodeProperties::ChangeOp(node, machine()->Word32PairSar());
       // We access the additional return values through projections.
-      Node* low_node = graph()->NewNode(common()->Projection(0), node);
-      Node* high_node = graph()->NewNode(common()->Projection(1), node);
+      Node* low_node =
+          graph()->NewNode(common()->Projection(0), node, graph()->start());
+      Node* high_node =
+          graph()->NewNode(common()->Projection(1), node, graph()->start());
       ReplaceNode(node, low_node, high_node);
       break;
     }
@@ -489,14 +526,16 @@
           machine()->Store(
               StoreRepresentation(MachineRepresentation::kWord32,
                                   WriteBarrierKind::kNoWriteBarrier)),
-          stack_slot, graph()->NewNode(common()->Int32Constant(4)),
+          stack_slot,
+          graph()->NewNode(common()->Int32Constant(kHigherWordOffset)),
           GetReplacementHigh(input), graph()->start(), graph()->start());
 
       Node* store_low_word = graph()->NewNode(
           machine()->Store(
               StoreRepresentation(MachineRepresentation::kWord32,
                                   WriteBarrierKind::kNoWriteBarrier)),
-          stack_slot, graph()->NewNode(common()->Int32Constant(0)),
+          stack_slot,
+          graph()->NewNode(common()->Int32Constant(kLowerWordOffset)),
           GetReplacementLow(input), store_high_word, graph()->start());
 
       Node* load =
@@ -522,15 +561,15 @@
           stack_slot, graph()->NewNode(common()->Int32Constant(0)), input,
           graph()->start(), graph()->start());
 
-      Node* high_node =
-          graph()->NewNode(machine()->Load(MachineType::Int32()), stack_slot,
-                           graph()->NewNode(common()->Int32Constant(4)), store,
-                           graph()->start());
+      Node* high_node = graph()->NewNode(
+          machine()->Load(MachineType::Int32()), stack_slot,
+          graph()->NewNode(common()->Int32Constant(kHigherWordOffset)), store,
+          graph()->start());
 
-      Node* low_node =
-          graph()->NewNode(machine()->Load(MachineType::Int32()), stack_slot,
-                           graph()->NewNode(common()->Int32Constant(0)), store,
-                           graph()->start());
+      Node* low_node = graph()->NewNode(
+          machine()->Load(MachineType::Int32()), stack_slot,
+          graph()->NewNode(common()->Int32Constant(kLowerWordOffset)), store,
+          graph()->start());
       ReplaceNode(node, low_node, high_node);
       break;
     }
diff --git a/src/compiler/int64-lowering.h b/src/compiler/int64-lowering.h
index 054c421..4ec4e82 100644
--- a/src/compiler/int64-lowering.h
+++ b/src/compiler/int64-lowering.h
@@ -26,6 +26,9 @@
   static int GetParameterCountAfterLowering(
       Signature<MachineRepresentation>* signature);
 
+  static const int kLowerWordOffset;
+  static const int kHigherWordOffset;
+
  private:
   enum class State : uint8_t { kUnvisited, kOnStack, kVisited };
 
@@ -54,6 +57,7 @@
   bool HasReplacementHigh(Node* node);
   Node* GetReplacementHigh(Node* node);
   void PreparePhiReplacement(Node* phi);
+  void GetIndexNodes(Node* index, Node*& index_low, Node*& index_high);
 
   struct NodeState {
     Node* node;
diff --git a/src/compiler/js-builtin-reducer.cc b/src/compiler/js-builtin-reducer.cc
index 0d69a89..81d6392 100644
--- a/src/compiler/js-builtin-reducer.cc
+++ b/src/compiler/js-builtin-reducer.cc
@@ -91,16 +91,211 @@
       jsgraph_(jsgraph),
       type_cache_(TypeCache::Get()) {}
 
-// ECMA-262, section 15.8.2.11.
+// ES6 section 20.2.2.1 Math.abs ( x )
+Reduction JSBuiltinReducer::ReduceMathAbs(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.abs(a:plain-primitive) -> NumberAbs(ToNumber(a))
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* value = graph()->NewNode(simplified()->NumberAbs(), input);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.6 Math.atan ( x )
+Reduction JSBuiltinReducer::ReduceMathAtan(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.atan(a:plain-primitive) -> NumberAtan(ToNumber(a))
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* value = graph()->NewNode(simplified()->NumberAtan(), input);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.8 Math.atan2 ( y, x )
+Reduction JSBuiltinReducer::ReduceMathAtan2(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchTwo(Type::PlainPrimitive(), Type::PlainPrimitive())) {
+    // Math.atan2(a:plain-primitive,
+    //            b:plain-primitive) -> NumberAtan2(ToNumber(a),
+    //                                              ToNumber(b))
+    Node* left = ToNumber(r.left());
+    Node* right = ToNumber(r.right());
+    Node* value = graph()->NewNode(simplified()->NumberAtan2(), left, right);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.7 Math.atanh ( x )
+Reduction JSBuiltinReducer::ReduceMathAtanh(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.atanh(a:number) -> NumberAtanh(a)
+    Node* value = graph()->NewNode(simplified()->NumberAtanh(), r.left());
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.10 Math.ceil ( x )
+Reduction JSBuiltinReducer::ReduceMathCeil(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.ceil(a:plain-primitive) -> NumberCeil(ToNumber(a))
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* value = graph()->NewNode(simplified()->NumberCeil(), input);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.11 Math.clz32 ( x )
+Reduction JSBuiltinReducer::ReduceMathClz32(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.clz32(a:plain-primitive) -> NumberClz32(ToUint32(a))
+    Node* input = ToUint32(r.GetJSCallInput(0));
+    Node* value = graph()->NewNode(simplified()->NumberClz32(), input);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.12 Math.cos ( x )
+Reduction JSBuiltinReducer::ReduceMathCos(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.cos(a:plain-primitive) -> NumberCos(ToNumber(a))
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* value = graph()->NewNode(simplified()->NumberCos(), input);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.14 Math.exp ( x )
+Reduction JSBuiltinReducer::ReduceMathExp(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.exp(a:plain-primitive) -> NumberExp(ToNumber(a))
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* value = graph()->NewNode(simplified()->NumberExp(), input);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.15 Math.expm1 ( x )
+Reduction JSBuiltinReducer::ReduceMathExpm1(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.expm1(a:number) -> NumberExpm1(a)
+    Node* value = graph()->NewNode(simplified()->NumberExpm1(), r.left());
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.16 Math.floor ( x )
+Reduction JSBuiltinReducer::ReduceMathFloor(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.floor(a:plain-primitive) -> NumberFloor(ToNumber(a))
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* value = graph()->NewNode(simplified()->NumberFloor(), input);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.17 Math.fround ( x )
+Reduction JSBuiltinReducer::ReduceMathFround(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.fround(a:plain-primitive) -> NumberFround(ToNumber(a))
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* value = graph()->NewNode(simplified()->NumberFround(), input);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.19 Math.imul ( x, y )
+Reduction JSBuiltinReducer::ReduceMathImul(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchTwo(Type::PlainPrimitive(), Type::PlainPrimitive())) {
+    // Math.imul(a:plain-primitive,
+    //           b:plain-primitive) -> NumberImul(ToUint32(a),
+    //                                            ToUint32(b))
+    Node* left = ToUint32(r.left());
+    Node* right = ToUint32(r.right());
+    Node* value = graph()->NewNode(simplified()->NumberImul(), left, right);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.20 Math.log ( x )
+Reduction JSBuiltinReducer::ReduceMathLog(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.log(a:plain-primitive) -> NumberLog(ToNumber(a))
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* value = graph()->NewNode(simplified()->NumberLog(), input);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.21 Math.log1p ( x )
+Reduction JSBuiltinReducer::ReduceMathLog1p(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.log1p(a:plain-primitive) -> NumberLog1p(ToNumber(a))
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* value = graph()->NewNode(simplified()->NumberLog1p(), input);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.22 Math.log10 ( x )
+Reduction JSBuiltinReducer::ReduceMathLog10(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.log10(a:number) -> NumberLog10(a)
+    Node* value = graph()->NewNode(simplified()->NumberLog10(), r.left());
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.23 Math.log2 ( x )
+Reduction JSBuiltinReducer::ReduceMathLog2(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.log2(a:number) -> NumberLog(a)
+    Node* value = graph()->NewNode(simplified()->NumberLog2(), r.left());
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.24 Math.max ( value1, value2, ...values )
 Reduction JSBuiltinReducer::ReduceMathMax(Node* node) {
   JSCallReduction r(node);
   if (r.InputsMatchZero()) {
     // Math.max() -> -Infinity
     return Replace(jsgraph()->Constant(-V8_INFINITY));
   }
-  if (r.InputsMatchOne(Type::Number())) {
-    // Math.max(a:number) -> a
-    return Replace(r.left());
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.max(a:plain-primitive) -> ToNumber(a)
+    Node* value = ToNumber(r.GetJSCallInput(0));
+    return Replace(value);
   }
   if (r.InputsMatchAll(Type::Integral32())) {
     // Math.max(a:int32, b:int32, ...)
@@ -117,67 +312,28 @@
   return NoChange();
 }
 
-// ES6 section 20.2.2.19 Math.imul ( x, y )
-Reduction JSBuiltinReducer::ReduceMathImul(Node* node) {
+// ES6 section 20.2.2.25 Math.min ( value1, value2, ...values )
+Reduction JSBuiltinReducer::ReduceMathMin(Node* node) {
   JSCallReduction r(node);
-  if (r.InputsMatchTwo(Type::Number(), Type::Number())) {
-    // Math.imul(a:number, b:number) -> NumberImul(NumberToUint32(a),
-    //                                             NumberToUint32(b))
-    Node* a = graph()->NewNode(simplified()->NumberToUint32(), r.left());
-    Node* b = graph()->NewNode(simplified()->NumberToUint32(), r.right());
-    Node* value = graph()->NewNode(simplified()->NumberImul(), a, b);
+  if (r.InputsMatchZero()) {
+    // Math.min() -> Infinity
+    return Replace(jsgraph()->Constant(V8_INFINITY));
+  }
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.min(a:plain-primitive) -> ToNumber(a)
+    Node* value = ToNumber(r.GetJSCallInput(0));
     return Replace(value);
   }
-  return NoChange();
-}
-
-// ES6 section 20.2.2.10 Math.ceil ( x )
-Reduction JSBuiltinReducer::ReduceMathCeil(Node* node) {
-  JSCallReduction r(node);
-  if (r.InputsMatchOne(Type::Number())) {
-    // Math.ceil(a:number) -> NumberCeil(a)
-    Node* value = graph()->NewNode(simplified()->NumberCeil(), r.left());
-    return Replace(value);
-  }
-  return NoChange();
-}
-
-// ES6 section 20.2.2.11 Math.clz32 ( x )
-Reduction JSBuiltinReducer::ReduceMathClz32(Node* node) {
-  JSCallReduction r(node);
-  if (r.InputsMatchOne(Type::Unsigned32())) {
-    // Math.clz32(a:unsigned32) -> NumberClz32(a)
-    Node* value = graph()->NewNode(simplified()->NumberClz32(), r.left());
-    return Replace(value);
-  }
-  if (r.InputsMatchOne(Type::Number())) {
-    // Math.clz32(a:number) -> NumberClz32(NumberToUint32(a))
-    Node* value = graph()->NewNode(
-        simplified()->NumberClz32(),
-        graph()->NewNode(simplified()->NumberToUint32(), r.left()));
-    return Replace(value);
-  }
-  return NoChange();
-}
-
-// ES6 draft 08-24-14, section 20.2.2.16.
-Reduction JSBuiltinReducer::ReduceMathFloor(Node* node) {
-  JSCallReduction r(node);
-  if (r.InputsMatchOne(Type::Number())) {
-    // Math.floor(a:number) -> NumberFloor(a)
-    Node* value = graph()->NewNode(simplified()->NumberFloor(), r.left());
-    return Replace(value);
-  }
-  return NoChange();
-}
-
-// ES6 draft 08-24-14, section 20.2.2.17.
-Reduction JSBuiltinReducer::ReduceMathFround(Node* node) {
-  JSCallReduction r(node);
-  if (r.InputsMatchOne(Type::NumberOrUndefined())) {
-    // Math.fround(a:number) -> TruncateFloat64ToFloat32(a)
-    Node* value =
-        graph()->NewNode(machine()->TruncateFloat64ToFloat32(), r.left());
+  if (r.InputsMatchAll(Type::Integral32())) {
+    // Math.min(a:int32, b:int32, ...)
+    Node* value = r.GetJSCallInput(0);
+    for (int i = 1; i < r.GetJSCallArity(); i++) {
+      Node* const input = r.GetJSCallInput(i);
+      value = graph()->NewNode(
+          common()->Select(MachineRepresentation::kNone),
+          graph()->NewNode(simplified()->NumberLessThan(), input, value), input,
+          value);
+    }
     return Replace(value);
   }
   return NoChange();
@@ -186,9 +342,33 @@
 // ES6 section 20.2.2.28 Math.round ( x )
 Reduction JSBuiltinReducer::ReduceMathRound(Node* node) {
   JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.round(a:plain-primitive) -> NumberRound(ToNumber(a))
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* value = graph()->NewNode(simplified()->NumberRound(), input);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.9 Math.cbrt ( x )
+Reduction JSBuiltinReducer::ReduceMathCbrt(Node* node) {
+  JSCallReduction r(node);
   if (r.InputsMatchOne(Type::Number())) {
-    // Math.round(a:number) -> NumberRound(a)
-    Node* value = graph()->NewNode(simplified()->NumberRound(), r.left());
+    // Math.cbrt(a:number) -> NumberCbrt(a)
+    Node* value = graph()->NewNode(simplified()->NumberCbrt(), r.left());
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.30 Math.sin ( x )
+Reduction JSBuiltinReducer::ReduceMathSin(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.sin(a:plain-primitive) -> NumberSin(ToNumber(a))
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* value = graph()->NewNode(simplified()->NumberSin(), input);
     return Replace(value);
   }
   return NoChange();
@@ -197,9 +377,22 @@
 // ES6 section 20.2.2.32 Math.sqrt ( x )
 Reduction JSBuiltinReducer::ReduceMathSqrt(Node* node) {
   JSCallReduction r(node);
-  if (r.InputsMatchOne(Type::Number())) {
-    // Math.sqrt(a:number) -> Float64Sqrt(a)
-    Node* value = graph()->NewNode(machine()->Float64Sqrt(), r.left());
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.sqrt(a:plain-primitive) -> NumberSqrt(ToNumber(a))
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* value = graph()->NewNode(simplified()->NumberSqrt(), input);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.33 Math.tan ( x )
+Reduction JSBuiltinReducer::ReduceMathTan(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.tan(a:plain-primitive) -> NumberTan(ToNumber(a))
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* value = graph()->NewNode(simplified()->NumberTan(), input);
     return Replace(value);
   }
   return NoChange();
@@ -208,9 +401,22 @@
 // ES6 section 20.2.2.35 Math.trunc ( x )
 Reduction JSBuiltinReducer::ReduceMathTrunc(Node* node) {
   JSCallReduction r(node);
-  if (r.InputsMatchOne(Type::Number())) {
-    // Math.trunc(a:number) -> NumberTrunc(a)
-    Node* value = graph()->NewNode(simplified()->NumberTrunc(), r.left());
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.trunc(a:plain-primitive) -> NumberTrunc(ToNumber(a))
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* value = graph()->NewNode(simplified()->NumberTrunc(), input);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits )
+Reduction JSBuiltinReducer::ReduceStringFromCharCode(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // String.fromCharCode(a:plain-primitive) -> StringFromCharCode(a)
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* value = graph()->NewNode(simplified()->StringFromCharCode(), input);
     return Replace(value);
   }
   return NoChange();
@@ -223,11 +429,17 @@
   // Dispatch according to the BuiltinFunctionId if present.
   if (!r.HasBuiltinFunctionId()) return NoChange();
   switch (r.GetBuiltinFunctionId()) {
-    case kMathMax:
-      reduction = ReduceMathMax(node);
+    case kMathAbs:
+      reduction = ReduceMathAbs(node);
       break;
-    case kMathImul:
-      reduction = ReduceMathImul(node);
+    case kMathAtan:
+      reduction = ReduceMathAtan(node);
+      break;
+    case kMathAtan2:
+      reduction = ReduceMathAtan2(node);
+      break;
+    case kMathAtanh:
+      reduction = ReduceMathAtanh(node);
       break;
     case kMathClz32:
       reduction = ReduceMathClz32(node);
@@ -235,21 +447,63 @@
     case kMathCeil:
       reduction = ReduceMathCeil(node);
       break;
+    case kMathCos:
+      reduction = ReduceMathCos(node);
+      break;
+    case kMathExp:
+      reduction = ReduceMathExp(node);
+      break;
+    case kMathExpm1:
+      reduction = ReduceMathExpm1(node);
+      break;
     case kMathFloor:
       reduction = ReduceMathFloor(node);
       break;
     case kMathFround:
       reduction = ReduceMathFround(node);
       break;
+    case kMathImul:
+      reduction = ReduceMathImul(node);
+      break;
+    case kMathLog:
+      reduction = ReduceMathLog(node);
+      break;
+    case kMathLog1p:
+      reduction = ReduceMathLog1p(node);
+      break;
+    case kMathLog10:
+      reduction = ReduceMathLog10(node);
+      break;
+    case kMathLog2:
+      reduction = ReduceMathLog2(node);
+      break;
+    case kMathMax:
+      reduction = ReduceMathMax(node);
+      break;
+    case kMathMin:
+      reduction = ReduceMathMin(node);
+      break;
+    case kMathCbrt:
+      reduction = ReduceMathCbrt(node);
+      break;
     case kMathRound:
       reduction = ReduceMathRound(node);
       break;
+    case kMathSin:
+      reduction = ReduceMathSin(node);
+      break;
     case kMathSqrt:
       reduction = ReduceMathSqrt(node);
       break;
+    case kMathTan:
+      reduction = ReduceMathTan(node);
+      break;
     case kMathTrunc:
       reduction = ReduceMathTrunc(node);
       break;
+    case kStringFromCharCode:
+      reduction = ReduceStringFromCharCode(node);
+      break;
     default:
       break;
   }
@@ -261,6 +515,18 @@
   return reduction;
 }
 
+Node* JSBuiltinReducer::ToNumber(Node* input) {
+  Type* input_type = NodeProperties::GetType(input);
+  if (input_type->Is(Type::Number())) return input;
+  return graph()->NewNode(simplified()->PlainPrimitiveToNumber(), input);
+}
+
+Node* JSBuiltinReducer::ToUint32(Node* input) {
+  input = ToNumber(input);
+  Type* input_type = NodeProperties::GetType(input);
+  if (input_type->Is(Type::Unsigned32())) return input;
+  return graph()->NewNode(simplified()->NumberToUint32(), input);
+}
 
 Graph* JSBuiltinReducer::graph() const { return jsgraph()->graph(); }
 
@@ -273,11 +539,6 @@
 }
 
 
-MachineOperatorBuilder* JSBuiltinReducer::machine() const {
-  return jsgraph()->machine();
-}
-
-
 SimplifiedOperatorBuilder* JSBuiltinReducer::simplified() const {
   return jsgraph()->simplified();
 }
diff --git a/src/compiler/js-builtin-reducer.h b/src/compiler/js-builtin-reducer.h
index dfeb409..c915792 100644
--- a/src/compiler/js-builtin-reducer.h
+++ b/src/compiler/js-builtin-reducer.h
@@ -18,7 +18,6 @@
 // Forward declarations.
 class CommonOperatorBuilder;
 class JSGraph;
-class MachineOperatorBuilder;
 class SimplifiedOperatorBuilder;
 
 
@@ -30,22 +29,39 @@
   Reduction Reduce(Node* node) final;
 
  private:
-  Reduction ReduceFunctionCall(Node* node);
-  Reduction ReduceMathMax(Node* node);
-  Reduction ReduceMathImul(Node* node);
+  Reduction ReduceMathAbs(Node* node);
+  Reduction ReduceMathAtan(Node* node);
+  Reduction ReduceMathAtan2(Node* node);
+  Reduction ReduceMathAtanh(Node* node);
   Reduction ReduceMathCeil(Node* node);
   Reduction ReduceMathClz32(Node* node);
+  Reduction ReduceMathCos(Node* node);
+  Reduction ReduceMathExp(Node* node);
   Reduction ReduceMathFloor(Node* node);
   Reduction ReduceMathFround(Node* node);
+  Reduction ReduceMathImul(Node* node);
+  Reduction ReduceMathLog(Node* node);
+  Reduction ReduceMathLog1p(Node* node);
+  Reduction ReduceMathLog10(Node* node);
+  Reduction ReduceMathLog2(Node* node);
+  Reduction ReduceMathMax(Node* node);
+  Reduction ReduceMathMin(Node* node);
+  Reduction ReduceMathCbrt(Node* node);
+  Reduction ReduceMathExpm1(Node* node);
   Reduction ReduceMathRound(Node* node);
+  Reduction ReduceMathSin(Node* node);
   Reduction ReduceMathSqrt(Node* node);
+  Reduction ReduceMathTan(Node* node);
   Reduction ReduceMathTrunc(Node* node);
+  Reduction ReduceStringFromCharCode(Node* node);
+
+  Node* ToNumber(Node* value);
+  Node* ToUint32(Node* value);
 
   Graph* graph() const;
   JSGraph* jsgraph() const { return jsgraph_; }
   Isolate* isolate() const;
   CommonOperatorBuilder* common() const;
-  MachineOperatorBuilder* machine() const;
   SimplifiedOperatorBuilder* simplified() const;
 
   JSGraph* const jsgraph_;
diff --git a/src/compiler/js-call-reducer.cc b/src/compiler/js-call-reducer.cc
index b3561e9..f4b0d7b 100644
--- a/src/compiler/js-call-reducer.cc
+++ b/src/compiler/js-call-reducer.cc
@@ -71,7 +71,6 @@
   size_t const arity = p.arity() - 2;
   NodeProperties::ReplaceValueInput(node, target, 0);
   NodeProperties::ReplaceValueInput(node, target, 1);
-  NodeProperties::RemoveFrameStateInput(node, 1);
   // TODO(bmeurer): We might need to propagate the tail call mode to
   // the JSCreateArray operator, because an Array call in tail call
   // position must always properly consume the parent stack frame.
@@ -89,7 +88,6 @@
   DCHECK_LE(2u, p.arity());
   Node* value = (p.arity() == 2) ? jsgraph()->ZeroConstant()
                                  : NodeProperties::GetValueInput(node, 2);
-  NodeProperties::RemoveFrameStateInput(node, 1);
   NodeProperties::ReplaceValueInputs(node, value);
   NodeProperties::ChangeOp(node, javascript()->ToNumber());
   return Changed(node);
@@ -220,9 +218,9 @@
   CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
   Node* target = NodeProperties::GetValueInput(node, 0);
   Node* context = NodeProperties::GetContextInput(node);
-  Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
   Node* control = NodeProperties::GetControlInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
+  Node* frame_state = NodeProperties::FindFrameStateBefore(node);
 
   // Try to specialize JSCallFunction {node}s with constant {target}s.
   HeapObjectMatcher m(target);
@@ -233,7 +231,6 @@
 
       // Raise a TypeError if the {target} is a "classConstructor".
       if (IsClassConstructor(shared->kind())) {
-        NodeProperties::RemoveFrameStateInput(node, 0);
         NodeProperties::ReplaceValueInputs(node, target);
         NodeProperties::ChangeOp(
             node, javascript()->CallRuntime(
@@ -272,7 +269,7 @@
                                          isolate());
       CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
       ConvertReceiverMode const convert_mode =
-          (bound_this->IsNull() || bound_this->IsUndefined())
+          (bound_this->IsNull(isolate()) || bound_this->IsUndefined(isolate()))
               ? ConvertReceiverMode::kNullOrUndefined
               : ConvertReceiverMode::kNotNullOrUndefined;
       size_t arity = p.arity();
@@ -326,10 +323,11 @@
     }
 
     // Check that the {target} is still the {array_function}.
-    Node* check = graph()->NewNode(javascript()->StrictEqual(), target,
-                                   array_function, context);
-    control = graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
-                               effect, control);
+    Node* check = graph()->NewNode(
+        javascript()->StrictEqual(CompareOperationHints::Any()), target,
+        array_function, context);
+    control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                        frame_state, effect, control);
 
     // Turn the {node} into a {JSCreateArray} call.
     NodeProperties::ReplaceValueInput(node, array_function, 0);
@@ -343,13 +341,15 @@
           jsgraph()->Constant(handle(cell->value(), isolate()));
 
       // Check that the {target} is still the {target_function}.
-      Node* check = graph()->NewNode(javascript()->StrictEqual(), target,
-                                     target_function, context);
-      control = graph()->NewNode(common()->DeoptimizeUnless(), check,
-                                 frame_state, effect, control);
+      Node* check = graph()->NewNode(
+          javascript()->StrictEqual(CompareOperationHints::Any()), target,
+          target_function, context);
+      control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                          frame_state, effect, control);
 
       // Specialize the JSCallFunction node to the {target_function}.
       NodeProperties::ReplaceValueInput(node, target_function, 0);
+      NodeProperties::ReplaceEffectInput(node, effect);
       NodeProperties::ReplaceControlInput(node, control);
 
       // Try to further reduce the JSCallFunction {node}.
@@ -369,9 +369,9 @@
   Node* target = NodeProperties::GetValueInput(node, 0);
   Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
   Node* context = NodeProperties::GetContextInput(node);
-  Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
+  Node* frame_state = NodeProperties::FindFrameStateBefore(node);
 
   // Try to specialize JSCallConstruct {node}s with constant {target}s.
   HeapObjectMatcher m(target);
@@ -381,11 +381,6 @@
 
       // Raise a TypeError if the {target} is not a constructor.
       if (!function->IsConstructor()) {
-        // Drop the lazy bailout location and use the eager bailout point for
-        // the runtime function (actually as lazy bailout point). It doesn't
-        // really matter which bailout location we use since we never really
-        // go back after throwing the exception.
-        NodeProperties::RemoveFrameStateInput(node, 0);
         NodeProperties::ReplaceValueInputs(node, target);
         NodeProperties::ChangeOp(
             node, javascript()->CallRuntime(Runtime::kThrowCalledNonCallable));
@@ -405,7 +400,6 @@
         }
 
         // Turn the {node} into a {JSCreateArray} call.
-        NodeProperties::RemoveFrameStateInput(node, 1);
         for (int i = arity; i > 0; --i) {
           NodeProperties::ReplaceValueInput(
               node, NodeProperties::GetValueInput(node, i), i + 1);
@@ -451,15 +445,15 @@
     }
 
     // Check that the {target} is still the {array_function}.
-    Node* check = graph()->NewNode(javascript()->StrictEqual(), target,
-                                   array_function, context);
-    control = graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
-                               effect, control);
+    Node* check = graph()->NewNode(
+        javascript()->StrictEqual(CompareOperationHints::Any()), target,
+        array_function, context);
+    control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                        frame_state, effect, control);
 
     // Turn the {node} into a {JSCreateArray} call.
     NodeProperties::ReplaceEffectInput(node, effect);
     NodeProperties::ReplaceControlInput(node, control);
-    NodeProperties::RemoveFrameStateInput(node, 1);
     for (int i = arity; i > 0; --i) {
       NodeProperties::ReplaceValueInput(
           node, NodeProperties::GetValueInput(node, i), i + 1);
@@ -474,10 +468,11 @@
           jsgraph()->Constant(handle(cell->value(), isolate()));
 
       // Check that the {target} is still the {target_function}.
-      Node* check = graph()->NewNode(javascript()->StrictEqual(), target,
-                                     target_function, context);
-      control = graph()->NewNode(common()->DeoptimizeUnless(), check,
-                                 frame_state, effect, control);
+      Node* check = graph()->NewNode(
+          javascript()->StrictEqual(CompareOperationHints::Any()), target,
+          target_function, context);
+      control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                          frame_state, effect, control);
 
       // Specialize the JSCallConstruct node to the {target_function}.
       NodeProperties::ReplaceValueInput(node, target_function, 0);
diff --git a/src/compiler/js-context-specialization.cc b/src/compiler/js-context-specialization.cc
index 4d9d1d9..e02fc49 100644
--- a/src/compiler/js-context-specialization.cc
+++ b/src/compiler/js-context-specialization.cc
@@ -70,7 +70,7 @@
   // before the function to which it belongs has initialized the slot.
   // We must be conservative and check if the value in the slot is currently the
   // hole or undefined. If it is neither of these, then it must be initialized.
-  if (value->IsUndefined() || value->IsTheHole()) {
+  if (value->IsUndefined(isolate()) || value->IsTheHole(isolate())) {
     return NoChange();
   }
 
diff --git a/src/compiler/js-create-lowering.cc b/src/compiler/js-create-lowering.cc
index 16e1666..0f829d4 100644
--- a/src/compiler/js-create-lowering.cc
+++ b/src/compiler/js-create-lowering.cc
@@ -37,7 +37,8 @@
 
   // Primitive allocation of static size.
   void Allocate(int size, PretenureFlag pretenure = NOT_TENURED) {
-    effect_ = graph()->NewNode(common()->BeginRegion(), effect_);
+    effect_ = graph()->NewNode(
+        common()->BeginRegion(RegionObservability::kNotObservable), effect_);
     allocation_ =
         graph()->NewNode(simplified()->Allocate(pretenure),
                          jsgraph()->Constant(size), effect_, control_);
@@ -311,11 +312,10 @@
         Operator::Properties properties = node->op()->properties();
         CallDescriptor* desc = Linkage::GetStubCallDescriptor(
             isolate(), graph()->zone(), callable.descriptor(), 0,
-            CallDescriptor::kNoFlags, properties);
+            CallDescriptor::kNeedsFrameState, properties);
         const Operator* new_op = common()->Call(desc);
         Node* stub_code = jsgraph()->HeapConstant(callable.code());
         node->InsertInput(graph()->zone(), 0, stub_code);
-        node->RemoveInput(3);  // Remove the frame state.
         NodeProperties::ChangeOp(node, new_op);
         return Changed(node);
       }
@@ -324,11 +324,10 @@
         Operator::Properties properties = node->op()->properties();
         CallDescriptor* desc = Linkage::GetStubCallDescriptor(
             isolate(), graph()->zone(), callable.descriptor(), 0,
-            CallDescriptor::kNoFlags, properties);
+            CallDescriptor::kNeedsFrameState, properties);
         const Operator* new_op = common()->Call(desc);
         Node* stub_code = jsgraph()->HeapConstant(callable.code());
         node->InsertInput(graph()->zone(), 0, stub_code);
-        node->RemoveInput(3);  // Remove the frame state.
         NodeProperties::ChangeOp(node, new_op);
         return Changed(node);
       }
@@ -551,44 +550,40 @@
   CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
   Handle<SharedFunctionInfo> shared = p.shared_info();
 
-  // Use inline allocation for functions that don't need literals cloning.
-  if (shared->num_literals() == 0) {
-    Node* effect = NodeProperties::GetEffectInput(node);
-    Node* control = NodeProperties::GetControlInput(node);
-    Node* context = NodeProperties::GetContextInput(node);
-    Node* native_context = effect = graph()->NewNode(
-        javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-        context, context, effect);
-    int function_map_index =
-        Context::FunctionMapIndex(shared->language_mode(), shared->kind());
-    Node* function_map = effect =
-        graph()->NewNode(javascript()->LoadContext(0, function_map_index, true),
-                         native_context, native_context, effect);
-    // Note that it is only safe to embed the raw entry point of the compile
-    // lazy stub into the code, because that stub is immortal and immovable.
-    Node* compile_entry = jsgraph()->IntPtrConstant(reinterpret_cast<intptr_t>(
-        jsgraph()->isolate()->builtins()->CompileLazy()->entry()));
-    Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
-    Node* the_hole = jsgraph()->TheHoleConstant();
-    Node* undefined = jsgraph()->UndefinedConstant();
-    AllocationBuilder a(jsgraph(), effect, control);
-    STATIC_ASSERT(JSFunction::kSize == 9 * kPointerSize);
-    a.Allocate(JSFunction::kSize, p.pretenure());
-    a.Store(AccessBuilder::ForMap(), function_map);
-    a.Store(AccessBuilder::ForJSObjectProperties(), empty_fixed_array);
-    a.Store(AccessBuilder::ForJSObjectElements(), empty_fixed_array);
-    a.Store(AccessBuilder::ForJSFunctionLiterals(), empty_fixed_array);
-    a.Store(AccessBuilder::ForJSFunctionPrototypeOrInitialMap(), the_hole);
-    a.Store(AccessBuilder::ForJSFunctionSharedFunctionInfo(), shared);
-    a.Store(AccessBuilder::ForJSFunctionContext(), context);
-    a.Store(AccessBuilder::ForJSFunctionCodeEntry(), compile_entry);
-    a.Store(AccessBuilder::ForJSFunctionNextFunctionLink(), undefined);
-    RelaxControls(node);
-    a.FinishAndChange(node);
-    return Changed(node);
-  }
-
-  return NoChange();
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  Node* context = NodeProperties::GetContextInput(node);
+  Node* native_context = effect = graph()->NewNode(
+      javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+      context, context, effect);
+  int function_map_index =
+      Context::FunctionMapIndex(shared->language_mode(), shared->kind());
+  Node* function_map = effect =
+      graph()->NewNode(javascript()->LoadContext(0, function_map_index, true),
+                       native_context, native_context, effect);
+  // Note that it is only safe to embed the raw entry point of the compile
+  // lazy stub into the code, because that stub is immortal and immovable.
+  Node* compile_entry = jsgraph()->IntPtrConstant(reinterpret_cast<intptr_t>(
+      jsgraph()->isolate()->builtins()->CompileLazy()->entry()));
+  Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
+  Node* empty_literals_array = jsgraph()->EmptyLiteralsArrayConstant();
+  Node* the_hole = jsgraph()->TheHoleConstant();
+  Node* undefined = jsgraph()->UndefinedConstant();
+  AllocationBuilder a(jsgraph(), effect, control);
+  STATIC_ASSERT(JSFunction::kSize == 9 * kPointerSize);
+  a.Allocate(JSFunction::kSize, p.pretenure());
+  a.Store(AccessBuilder::ForMap(), function_map);
+  a.Store(AccessBuilder::ForJSObjectProperties(), empty_fixed_array);
+  a.Store(AccessBuilder::ForJSObjectElements(), empty_fixed_array);
+  a.Store(AccessBuilder::ForJSFunctionLiterals(), empty_literals_array);
+  a.Store(AccessBuilder::ForJSFunctionPrototypeOrInitialMap(), the_hole);
+  a.Store(AccessBuilder::ForJSFunctionSharedFunctionInfo(), shared);
+  a.Store(AccessBuilder::ForJSFunctionContext(), context);
+  a.Store(AccessBuilder::ForJSFunctionCodeEntry(), compile_entry);
+  a.Store(AccessBuilder::ForJSFunctionNextFunctionLink(), undefined);
+  RelaxControls(node);
+  a.FinishAndChange(node);
+  return Changed(node);
 }
 
 Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
@@ -957,7 +952,8 @@
         site_context->ExitScope(current_site, boilerplate_object);
       } else if (property_details.representation().IsDouble()) {
         // Allocate a mutable HeapNumber box and store the value into it.
-        effect = graph()->NewNode(common()->BeginRegion(), effect);
+        effect = graph()->NewNode(
+            common()->BeginRegion(RegionObservability::kNotObservable), effect);
         value = effect = graph()->NewNode(
             simplified()->Allocate(NOT_TENURED),
             jsgraph()->Constant(HeapNumber::kSize), effect, control);
@@ -974,7 +970,7 @@
             graph()->NewNode(common()->FinishRegion(), value, effect);
       } else if (property_details.representation().IsSmi()) {
         // Ensure that value is stored as smi.
-        value = boilerplate_value->IsUninitialized()
+        value = boilerplate_value->IsUninitialized(isolate())
                     ? jsgraph()->ZeroConstant()
                     : jsgraph()->Constant(boilerplate_value);
       } else {
diff --git a/src/compiler/js-generic-lowering.cc b/src/compiler/js-generic-lowering.cc
index 105298e..47a82d2 100644
--- a/src/compiler/js-generic-lowering.cc
+++ b/src/compiler/js-generic-lowering.cc
@@ -156,17 +156,15 @@
   Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
   const PropertyAccess& p = PropertyAccessOf(node->op());
-  Callable callable =
-      CodeFactory::KeyedLoadICInOptimizedCode(isolate(), UNINITIALIZED);
+  Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
   // Load the type feedback vector from the closure.
-  Node* shared_info = effect = graph()->NewNode(
+  Node* literals = effect = graph()->NewNode(
       machine()->Load(MachineType::AnyTagged()), closure,
-      jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
-                                kHeapObjectTag),
+      jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
       effect, control);
   Node* vector = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), shared_info,
-      jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+      machine()->Load(MachineType::AnyTagged()), literals,
+      jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
                                 kHeapObjectTag),
       effect, control);
   node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
@@ -182,17 +180,15 @@
   Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
   NamedAccess const& p = NamedAccessOf(node->op());
-  Callable callable = CodeFactory::LoadICInOptimizedCode(
-      isolate(), NOT_INSIDE_TYPEOF, UNINITIALIZED);
+  Callable callable = CodeFactory::LoadICInOptimizedCode(isolate());
   // Load the type feedback vector from the closure.
-  Node* shared_info = effect = graph()->NewNode(
+  Node* literals = effect = graph()->NewNode(
       machine()->Load(MachineType::AnyTagged()), closure,
-      jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
-                                kHeapObjectTag),
+      jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
       effect, control);
   Node* vector = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), shared_info,
-      jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+      machine()->Load(MachineType::AnyTagged()), literals,
+      jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
                                 kHeapObjectTag),
       effect, control);
   node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
@@ -205,39 +201,25 @@
 
 void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
   Node* closure = NodeProperties::GetValueInput(node, 0);
-  Node* context = NodeProperties::GetContextInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
   const LoadGlobalParameters& p = LoadGlobalParametersOf(node->op());
-  Callable callable = CodeFactory::LoadICInOptimizedCode(
-      isolate(), p.typeof_mode(), UNINITIALIZED);
+  Callable callable =
+      CodeFactory::LoadGlobalICInOptimizedCode(isolate(), p.typeof_mode());
   // Load the type feedback vector from the closure.
-  Node* shared_info = effect = graph()->NewNode(
+  Node* literals = effect = graph()->NewNode(
       machine()->Load(MachineType::AnyTagged()), closure,
-      jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
-                                kHeapObjectTag),
+      jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
       effect, control);
   Node* vector = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), shared_info,
-      jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+      machine()->Load(MachineType::AnyTagged()), literals,
+      jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
                                 kHeapObjectTag),
       effect, control);
-  // Load global object from the context.
-  Node* native_context = effect =
-      graph()->NewNode(machine()->Load(MachineType::AnyTagged()), context,
-                       jsgraph()->IntPtrConstant(
-                           Context::SlotOffset(Context::NATIVE_CONTEXT_INDEX)),
-                       effect, control);
-  Node* global = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), native_context,
-      jsgraph()->IntPtrConstant(Context::SlotOffset(Context::EXTENSION_INDEX)),
-      effect, control);
-  node->InsertInput(zone(), 0, global);
-  node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
-  node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
-  node->ReplaceInput(3, vector);
-  node->ReplaceInput(6, effect);
+  node->InsertInput(zone(), 0, jsgraph()->SmiConstant(p.feedback().index()));
+  node->ReplaceInput(1, vector);
+  node->ReplaceInput(4, effect);
   ReplaceWithStubCall(node, callable, flags);
 }
 
@@ -249,17 +231,16 @@
   CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
   PropertyAccess const& p = PropertyAccessOf(node->op());
   LanguageMode language_mode = p.language_mode();
-  Callable callable = CodeFactory::KeyedStoreICInOptimizedCode(
-      isolate(), language_mode, UNINITIALIZED);
+  Callable callable =
+      CodeFactory::KeyedStoreICInOptimizedCode(isolate(), language_mode);
   // Load the type feedback vector from the closure.
-  Node* shared_info = effect = graph()->NewNode(
+  Node* literals = effect = graph()->NewNode(
       machine()->Load(MachineType::AnyTagged()), closure,
-      jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
-                                kHeapObjectTag),
+      jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
       effect, control);
   Node* vector = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), shared_info,
-      jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+      machine()->Load(MachineType::AnyTagged()), literals,
+      jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
                                 kHeapObjectTag),
       effect, control);
   node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
@@ -275,17 +256,16 @@
   Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
   NamedAccess const& p = NamedAccessOf(node->op());
-  Callable callable = CodeFactory::StoreICInOptimizedCode(
-      isolate(), p.language_mode(), UNINITIALIZED);
+  Callable callable =
+      CodeFactory::StoreICInOptimizedCode(isolate(), p.language_mode());
   // Load the type feedback vector from the closure.
-  Node* shared_info = effect = graph()->NewNode(
+  Node* literals = effect = graph()->NewNode(
       machine()->Load(MachineType::AnyTagged()), closure,
-      jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
-                                kHeapObjectTag),
+      jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
       effect, control);
   Node* vector = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), shared_info,
-      jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+      machine()->Load(MachineType::AnyTagged()), literals,
+      jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
                                 kHeapObjectTag),
       effect, control);
   node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
@@ -303,17 +283,16 @@
   Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
   const StoreGlobalParameters& p = StoreGlobalParametersOf(node->op());
-  Callable callable = CodeFactory::StoreICInOptimizedCode(
-      isolate(), p.language_mode(), UNINITIALIZED);
+  Callable callable =
+      CodeFactory::StoreICInOptimizedCode(isolate(), p.language_mode());
   // Load the type feedback vector from the closure.
-  Node* shared_info = effect = graph()->NewNode(
+  Node* literals = effect = graph()->NewNode(
       machine()->Load(MachineType::AnyTagged()), closure,
-      jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
-                                kHeapObjectTag),
+      jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
       effect, control);
   Node* vector = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), shared_info,
-      jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+      machine()->Load(MachineType::AnyTagged()), literals,
+      jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
                                 kHeapObjectTag),
       effect, control);
   // Load global object from the context.
@@ -441,7 +420,8 @@
           CallDescriptor::kNeedsFrameState);
       node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
       node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
-      node->InsertInput(graph()->zone(), 3, jsgraph()->UndefinedConstant());
+      node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(0));
+      node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
       NodeProperties::ChangeOp(node, common()->Call(desc));
     } else if (arity == 1) {
       // TODO(bmeurer): Optimize for the 0 length non-holey case?
@@ -456,8 +436,7 @@
       node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
       NodeProperties::ChangeOp(node, common()->Call(desc));
     } else {
-      ArrayNArgumentsConstructorStub stub(isolate(), elements_kind,
-                                          override_mode);
+      ArrayNArgumentsConstructorStub stub(isolate());
       CallDescriptor* desc = Linkage::GetStubCallDescriptor(
           isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
           arity + 1, CallDescriptor::kNeedsFrameState);
@@ -485,9 +464,8 @@
   Handle<SharedFunctionInfo> const shared_info = p.shared_info();
   node->InsertInput(zone(), 0, jsgraph()->HeapConstant(shared_info));
 
-  // Use the FastNewClosureStub that allocates in new space only for nested
-  // functions that don't need literals cloning.
-  if (p.pretenure() == NOT_TENURED && shared_info->num_literals() == 0) {
+  // Use the FastNewClosureStub only for functions allocated in new space.
+  if (p.pretenure() == NOT_TENURED) {
     Callable callable = CodeFactory::FastNewClosure(
         isolate(), shared_info->language_mode(), shared_info->kind());
     ReplaceWithStubCall(node, callable, flags);
@@ -679,6 +657,17 @@
   NodeProperties::ChangeOp(node, machine()->Store(representation));
 }
 
+void JSGenericLowering::LowerJSGeneratorStore(Node* node) {
+  UNREACHABLE();  // Eliminated in typed lowering.
+}
+
+void JSGenericLowering::LowerJSGeneratorRestoreContinuation(Node* node) {
+  UNREACHABLE();  // Eliminated in typed lowering.
+}
+
+void JSGenericLowering::LowerJSGeneratorRestoreRegister(Node* node) {
+  UNREACHABLE();  // Eliminated in typed lowering.
+}
 
 void JSGenericLowering::LowerJSStackCheck(Node* node) {
   Node* effect = NodeProperties::GetEffectInput(node);
diff --git a/src/compiler/js-global-object-specialization.cc b/src/compiler/js-global-object-specialization.cc
index 81ea1ad..31407e8 100644
--- a/src/compiler/js-global-object-specialization.cc
+++ b/src/compiler/js-global-object-specialization.cc
@@ -12,7 +12,7 @@
 #include "src/compiler/node-properties.h"
 #include "src/compiler/simplified-operator.h"
 #include "src/lookup.h"
-#include "src/objects-inl.h"  // TODO(mstarzinger): Temporary cycle breaker!
+#include "src/objects-inl.h"
 #include "src/type-cache.h"
 
 namespace v8 {
@@ -131,9 +131,9 @@
   DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode());
   Handle<Name> name = StoreGlobalParametersOf(node->op()).name();
   Node* value = NodeProperties::GetValueInput(node, 0);
-  Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
+  Node* frame_state = NodeProperties::FindFrameStateBefore(node);
 
   // Retrieve the global object from the given {node}.
   Handle<JSGlobalObject> global_object;
@@ -173,8 +173,8 @@
       Node* check =
           graph()->NewNode(simplified()->ReferenceEqual(Type::Tagged()), value,
                            jsgraph()->Constant(property_cell_value));
-      control = graph()->NewNode(common()->DeoptimizeUnless(), check,
-                                 frame_state, effect, control);
+      control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                          frame_state, effect, control);
       break;
     }
     case PropertyCellType::kConstantType: {
@@ -185,8 +185,8 @@
       Type* property_cell_value_type = Type::TaggedSigned();
       if (property_cell_value->IsHeapObject()) {
         // Deoptimize if the {value} is a Smi.
-        control = graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
-                                   effect, control);
+        control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
+                                            frame_state, effect, control);
 
         // Load the {value} map check against the {property_cell} map.
         Node* value_map = effect =
@@ -199,8 +199,8 @@
             jsgraph()->HeapConstant(property_cell_value_map));
         property_cell_value_type = Type::TaggedPointer();
       }
-      control = graph()->NewNode(common()->DeoptimizeUnless(), check,
-                                 frame_state, effect, control);
+      control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                          frame_state, effect, control);
       effect = graph()->NewNode(
           simplified()->StoreField(
               AccessBuilder::ForPropertyCellValue(property_cell_value_type)),
diff --git a/src/compiler/js-graph.cc b/src/compiler/js-graph.cc
index 229169f..3f20daa 100644
--- a/src/compiler/js-graph.cc
+++ b/src/compiler/js-graph.cc
@@ -24,6 +24,11 @@
                 HeapConstant(isolate()->builtins()->AllocateInOldSpace()));
 }
 
+Node* JSGraph::ToNumberBuiltinConstant() {
+  return CACHED(kToNumberBuiltinConstant,
+                HeapConstant(isolate()->builtins()->ToNumber()));
+}
+
 Node* JSGraph::CEntryStubConstant(int result_size) {
   if (result_size == 1) {
     return CACHED(kCEntryStubConstant,
@@ -38,6 +43,11 @@
                 HeapConstant(factory()->empty_fixed_array()));
 }
 
+Node* JSGraph::EmptyLiteralsArrayConstant() {
+  return CACHED(kEmptyLiteralsArrayConstant,
+                HeapConstant(factory()->empty_literals_array()));
+}
+
 Node* JSGraph::HeapNumberMapConstant() {
   return CACHED(kHeapNumberMapConstant,
                 HeapConstant(factory()->heap_number_map()));
@@ -108,15 +118,15 @@
   // canonicalized node can be used.
   if (value->IsNumber()) {
     return Constant(value->Number());
-  } else if (value->IsUndefined()) {
+  } else if (value->IsUndefined(isolate())) {
     return UndefinedConstant();
-  } else if (value->IsTrue()) {
+  } else if (value->IsTrue(isolate())) {
     return TrueConstant();
-  } else if (value->IsFalse()) {
+  } else if (value->IsFalse(isolate())) {
     return FalseConstant();
-  } else if (value->IsNull()) {
+  } else if (value->IsNull(isolate())) {
     return NullConstant();
-  } else if (value->IsTheHole()) {
+  } else if (value->IsTheHole(isolate())) {
     return TheHoleConstant();
   } else {
     return HeapConstant(Handle<HeapObject>::cast(value));
@@ -156,7 +166,8 @@
 }
 
 Node* JSGraph::RelocatableInt32Constant(int32_t value, RelocInfo::Mode rmode) {
-  Node** loc = cache_.FindRelocatableInt32Constant(value);
+  Node** loc = cache_.FindRelocatableInt32Constant(
+      value, static_cast<RelocInfoMode>(rmode));
   if (*loc == nullptr) {
     *loc = graph()->NewNode(common()->RelocatableInt32Constant(value, rmode));
   }
@@ -164,7 +175,8 @@
 }
 
 Node* JSGraph::RelocatableInt64Constant(int64_t value, RelocInfo::Mode rmode) {
-  Node** loc = cache_.FindRelocatableInt64Constant(value);
+  Node** loc = cache_.FindRelocatableInt64Constant(
+      value, static_cast<RelocInfoMode>(rmode));
   if (*loc == nullptr) {
     *loc = graph()->NewNode(common()->RelocatableInt64Constant(value, rmode));
   }
@@ -218,22 +230,10 @@
   return ExternalConstant(ExternalReference(function_id, isolate()));
 }
 
-
-Node* JSGraph::EmptyFrameState() {
-  Node* empty_frame_state = cached_nodes_[kEmptyFrameState];
-  if (!empty_frame_state || empty_frame_state->IsDead()) {
-    Node* state_values = graph()->NewNode(common()->StateValues(0));
-    empty_frame_state = graph()->NewNode(
-        common()->FrameState(BailoutId::None(),
-                             OutputFrameStateCombine::Ignore(), nullptr),
-        state_values, state_values, state_values, NoContextConstant(),
-        UndefinedConstant(), graph()->start());
-    cached_nodes_[kEmptyFrameState] = empty_frame_state;
-  }
-  return empty_frame_state;
+Node* JSGraph::EmptyStateValues() {
+  return CACHED(kEmptyStateValues, graph()->NewNode(common()->StateValues(0)));
 }
 
-
 Node* JSGraph::Dead() {
   return CACHED(kDead, graph()->NewNode(common()->Dead()));
 }
diff --git a/src/compiler/js-graph.h b/src/compiler/js-graph.h
index e772da8..fe5545a 100644
--- a/src/compiler/js-graph.h
+++ b/src/compiler/js-graph.h
@@ -41,8 +41,10 @@
   // Canonicalized global constants.
   Node* AllocateInNewSpaceStubConstant();
   Node* AllocateInOldSpaceStubConstant();
+  Node* ToNumberBuiltinConstant();
   Node* CEntryStubConstant(int result_size);
   Node* EmptyFixedArrayConstant();
+  Node* EmptyLiteralsArrayConstant();
   Node* HeapNumberMapConstant();
   Node* OptimizedOutConstant();
   Node* StaleRegisterConstant();
@@ -123,9 +125,9 @@
   // stubs and runtime functions that do not require a context.
   Node* NoContextConstant() { return ZeroConstant(); }
 
-  // Creates an empty frame states for cases where we know that a function
-  // cannot deopt.
-  Node* EmptyFrameState();
+  // Creates an empty StateValues node, used when we don't have any concrete
+  // values for a certain part of the frame state.
+  Node* EmptyStateValues();
 
   // Create a control node that serves as dependency for dead nodes.
   Node* Dead();
@@ -145,8 +147,10 @@
   enum CachedNode {
     kAllocateInNewSpaceStubConstant,
     kAllocateInOldSpaceStubConstant,
+    kToNumberBuiltinConstant,
     kCEntryStubConstant,
     kEmptyFixedArrayConstant,
+    kEmptyLiteralsArrayConstant,
     kHeapNumberMapConstant,
     kOptimizedOutConstant,
     kStaleRegisterConstant,
@@ -158,7 +162,7 @@
     kZeroConstant,
     kOneConstant,
     kNaNConstant,
-    kEmptyFrameState,
+    kEmptyStateValues,
     kDead,
     kNumCachedNodes  // Must remain last.
   };
diff --git a/src/compiler/js-inlining-heuristic.cc b/src/compiler/js-inlining-heuristic.cc
index 0e0508b..0118b92 100644
--- a/src/compiler/js-inlining-heuristic.cc
+++ b/src/compiler/js-inlining-heuristic.cc
@@ -75,13 +75,24 @@
 
   // Gather feedback on how often this call site has been hit before.
   int calls = -1;  // Same default as CallICNexus::ExtractCallCount.
-  // TODO(turbofan): We also want call counts for constructor calls.
   if (node->opcode() == IrOpcode::kJSCallFunction) {
     CallFunctionParameters p = CallFunctionParametersOf(node->op());
     if (p.feedback().IsValid()) {
       CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
       calls = nexus.ExtractCallCount();
     }
+  } else {
+    DCHECK_EQ(IrOpcode::kJSCallConstruct, node->opcode());
+    CallConstructParameters p = CallConstructParametersOf(node->op());
+    if (p.feedback().IsValid()) {
+      int const extra_index =
+          p.feedback().vector()->GetIndex(p.feedback().slot()) + 1;
+      Handle<Object> feedback_extra(p.feedback().vector()->get(extra_index),
+                                    function->GetIsolate());
+      if (feedback_extra->IsSmi()) {
+        calls = Handle<Smi>::cast(feedback_extra)->value();
+      }
+    }
   }
 
   // ---------------------------------------------------------------------------
diff --git a/src/compiler/js-inlining.cc b/src/compiler/js-inlining.cc
index 5c01ff3..0664105 100644
--- a/src/compiler/js-inlining.cc
+++ b/src/compiler/js-inlining.cc
@@ -4,18 +4,19 @@
 
 #include "src/compiler/js-inlining.h"
 
-#include "src/ast/ast.h"
 #include "src/ast/ast-numbering.h"
+#include "src/ast/ast.h"
 #include "src/ast/scopes.h"
 #include "src/compiler.h"
-#include "src/compiler/all-nodes.h"
 #include "src/compiler/ast-graph-builder.h"
+#include "src/compiler/ast-loop-assignment-analyzer.h"
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph-reducer.h"
 #include "src/compiler/js-operator.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/operator-properties.h"
+#include "src/compiler/type-hint-analyzer.h"
 #include "src/isolate-inl.h"
 #include "src/parsing/parser.h"
 #include "src/parsing/rewriter.h"
@@ -54,12 +55,8 @@
     return call_->InputAt(formal_arguments() + 1);
   }
 
-  Node* frame_state_before() {
-    return NodeProperties::GetFrameStateInput(call_, 1);
-  }
-
-  Node* frame_state_after() {
-    // Both, {JSCallFunction} and {JSCallConstruct}, have frame state after.
+  Node* frame_state() {
+    // Both, {JSCallFunction} and {JSCallConstruct}, have frame state.
     return NodeProperties::GetFrameStateInput(call_, 0);
   }
 
@@ -75,63 +72,6 @@
 };
 
 
-class CopyVisitor {
- public:
-  CopyVisitor(Graph* source_graph, Graph* target_graph, Zone* temp_zone)
-      : sentinel_op_(IrOpcode::kDead, Operator::kNoProperties, "Sentinel", 0, 0,
-                     0, 0, 0, 0),
-        sentinel_(target_graph->NewNode(&sentinel_op_)),
-        copies_(source_graph->NodeCount(), sentinel_, temp_zone),
-        source_graph_(source_graph),
-        target_graph_(target_graph),
-        temp_zone_(temp_zone) {}
-
-  Node* GetCopy(Node* orig) { return copies_[orig->id()]; }
-
-  void CopyGraph() {
-    NodeVector inputs(temp_zone_);
-    // TODO(bmeurer): AllNodes should be turned into something like
-    // Graph::CollectNodesReachableFromEnd() and the gray set stuff should be
-    // removed since it's only needed by the visualizer.
-    AllNodes all(temp_zone_, source_graph_);
-    // Copy all nodes reachable from end.
-    for (Node* orig : all.live) {
-      Node* copy = GetCopy(orig);
-      if (copy != sentinel_) {
-        // Mapping already exists.
-        continue;
-      }
-      // Copy the node.
-      inputs.clear();
-      for (Node* input : orig->inputs()) inputs.push_back(copies_[input->id()]);
-      copy = target_graph_->NewNode(orig->op(), orig->InputCount(),
-                                    inputs.empty() ? nullptr : &inputs[0]);
-      copies_[orig->id()] = copy;
-    }
-    // For missing inputs.
-    for (Node* orig : all.live) {
-      Node* copy = copies_[orig->id()];
-      for (int i = 0; i < copy->InputCount(); ++i) {
-        Node* input = copy->InputAt(i);
-        if (input == sentinel_) {
-          copy->ReplaceInput(i, GetCopy(orig->InputAt(i)));
-        }
-      }
-    }
-  }
-
-  const NodeVector& copies() const { return copies_; }
-
- private:
-  Operator const sentinel_op_;
-  Node* const sentinel_;
-  NodeVector copies_;
-  Graph* const source_graph_;
-  Graph* const target_graph_;
-  Zone* const temp_zone_;
-};
-
-
 Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
                                 Node* frame_state, Node* start, Node* end) {
   // The scheduler is smart enough to place our code; we just ensure {control}
@@ -390,7 +330,7 @@
   // TODO(turbofan): TranslatedState::GetAdaptedArguments() currently relies on
   // not inlining recursive functions. We might want to relax that at some
   // point.
-  for (Node* frame_state = call.frame_state_after();
+  for (Node* frame_state = call.frame_state();
        frame_state->opcode() == IrOpcode::kFrameState;
        frame_state = frame_state->InputAt(kFrameStateOuterStateInput)) {
     FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
@@ -416,6 +356,7 @@
   ParseInfo parse_info(&zone, function);
   CompilationInfo info(&parse_info, function);
   if (info_->is_deoptimization_enabled()) info.MarkAsDeoptimizationEnabled();
+  if (info_->is_type_feedback_enabled()) info.MarkAsTypeFeedbackEnabled();
 
   if (!Compiler::ParseAndAnalyze(info.parse_info())) {
     TRACE("Not inlining %s into %s because parsing failed\n",
@@ -433,6 +374,7 @@
           info_->shared_info()->DebugName()->ToCString().get());
     return NoChange();
   }
+
   // Remember that we inlined this function. This needs to be called right
   // after we ensure deoptimization support so that the code flusher
   // does not remove the code with the deoptimization support.
@@ -446,59 +388,75 @@
         shared_info->DebugName()->ToCString().get(),
         info_->shared_info()->DebugName()->ToCString().get());
 
-  // TODO(mstarzinger): We could use the temporary zone for the graph because
-  // nodes are copied. This however leads to Zone-Types being allocated in the
-  // wrong zone and makes the engine explode at high speeds. Explosion bad!
-  Graph graph(jsgraph_->zone());
-  JSGraph jsgraph(info.isolate(), &graph, jsgraph_->common(),
-                  jsgraph_->javascript(), jsgraph_->simplified(),
-                  jsgraph_->machine());
-  AstGraphBuilder graph_builder(local_zone_, &info, &jsgraph);
-  graph_builder.CreateGraph(false);
+  // If function was lazily compiled, it's literals array may not yet be set up.
+  JSFunction::EnsureLiterals(function);
 
-  CopyVisitor visitor(&graph, jsgraph_->graph(), &zone);
-  visitor.CopyGraph();
+  // Create the subgraph for the inlinee.
+  Node* start;
+  Node* end;
+  {
+    // Run the loop assignment analyzer on the inlinee.
+    AstLoopAssignmentAnalyzer loop_assignment_analyzer(&zone, &info);
+    LoopAssignmentAnalysis* loop_assignment =
+        loop_assignment_analyzer.Analyze();
 
-  Node* start = visitor.GetCopy(graph.start());
-  Node* end = visitor.GetCopy(graph.end());
-  Node* frame_state = call.frame_state_after();
-  Node* new_target = jsgraph_->UndefinedConstant();
+    // Run the type hint analyzer on the inlinee.
+    TypeHintAnalyzer type_hint_analyzer(&zone);
+    TypeHintAnalysis* type_hint_analysis =
+        type_hint_analyzer.Analyze(handle(shared_info->code(), info.isolate()));
 
-  // Insert nodes around the call that model the behavior required for a
-  // constructor dispatch (allocate implicit receiver and check return value).
-  // This models the behavior usually accomplished by our {JSConstructStub}.
-  // Note that the context has to be the callers context (input to call node).
-  Node* receiver = jsgraph_->UndefinedConstant();  // Implicit receiver.
-  if (node->opcode() == IrOpcode::kJSCallConstruct &&
-      NeedsImplicitReceiver(shared_info)) {
-    Node* effect = NodeProperties::GetEffectInput(node);
-    Node* context = NodeProperties::GetContextInput(node);
-    Node* create = jsgraph_->graph()->NewNode(
-        jsgraph_->javascript()->Create(), call.target(), call.new_target(),
-        context, call.frame_state_before(), effect);
-    NodeProperties::ReplaceEffectInput(node, create);
-    // Insert a check of the return value to determine whether the return value
-    // or the implicit receiver should be selected as a result of the call.
-    Node* check = jsgraph_->graph()->NewNode(
-        jsgraph_->javascript()->CallRuntime(Runtime::kInlineIsJSReceiver, 1),
-        node, context, node, start);
-    Node* select = jsgraph_->graph()->NewNode(
-        jsgraph_->common()->Select(MachineRepresentation::kTagged), check, node,
-        create);
-    NodeProperties::ReplaceUses(node, select, check, node, node);
-    NodeProperties::ReplaceValueInput(select, node, 1);
-    NodeProperties::ReplaceValueInput(check, node, 0);
-    NodeProperties::ReplaceEffectInput(check, node);
-    receiver = create;  // The implicit receiver.
+    // Run the AstGraphBuilder to create the subgraph.
+    Graph::SubgraphScope scope(graph());
+    AstGraphBuilder graph_builder(&zone, &info, jsgraph(), loop_assignment,
+                                  type_hint_analysis);
+    graph_builder.CreateGraph(false);
+
+    // Extract the inlinee start/end nodes.
+    start = graph()->start();
+    end = graph()->end();
   }
 
-  // Swizzle the inputs of the {JSCallConstruct} node to look like inputs to a
-  // normal {JSCallFunction} node so that the rest of the inlining machinery
-  // behaves as if we were dealing with a regular function invocation.
+  Node* frame_state = call.frame_state();
+  Node* new_target = jsgraph_->UndefinedConstant();
+
+  // Inline {JSCallConstruct} requires some additional magic.
   if (node->opcode() == IrOpcode::kJSCallConstruct) {
+    // Insert nodes around the call that model the behavior required for a
+    // constructor dispatch (allocate implicit receiver and check return value).
+    // This models the behavior usually accomplished by our {JSConstructStub}.
+    // Note that the context has to be the callers context (input to call node).
+    Node* receiver = jsgraph_->UndefinedConstant();  // Implicit receiver.
+    if (NeedsImplicitReceiver(shared_info)) {
+      Node* frame_state_before = NodeProperties::FindFrameStateBefore(node);
+      Node* effect = NodeProperties::GetEffectInput(node);
+      Node* context = NodeProperties::GetContextInput(node);
+      Node* create = jsgraph_->graph()->NewNode(
+          jsgraph_->javascript()->Create(), call.target(), call.new_target(),
+          context, frame_state_before, effect);
+      NodeProperties::ReplaceEffectInput(node, create);
+      // Insert a check of the return value to determine whether the return
+      // value
+      // or the implicit receiver should be selected as a result of the call.
+      Node* check = jsgraph_->graph()->NewNode(
+          jsgraph_->javascript()->CallRuntime(Runtime::kInlineIsJSReceiver, 1),
+          node, context, node, start);
+      Node* select = jsgraph_->graph()->NewNode(
+          jsgraph_->common()->Select(MachineRepresentation::kTagged), check,
+          node, create);
+      NodeProperties::ReplaceUses(node, select, check, node, node);
+      NodeProperties::ReplaceValueInput(select, node, 1);
+      NodeProperties::ReplaceValueInput(check, node, 0);
+      NodeProperties::ReplaceEffectInput(check, node);
+      receiver = create;  // The implicit receiver.
+    }
+
+    // Swizzle the inputs of the {JSCallConstruct} node to look like inputs to a
+    // normal {JSCallFunction} node so that the rest of the inlining machinery
+    // behaves as if we were dealing with a regular function invocation.
     new_target = call.new_target();  // Retrieve new target value input.
     node->RemoveInput(call.formal_arguments() + 1);  // Drop new target.
     node->InsertInput(jsgraph_->graph()->zone(), 1, receiver);
+
     // Insert a construct stub frame into the chain of frame states. This will
     // reconstruct the proper frame when deoptimizing within the constructor.
     frame_state = CreateArtificialFrameState(
@@ -521,10 +479,11 @@
   if (node->opcode() == IrOpcode::kJSCallFunction &&
       is_sloppy(parse_info.language_mode()) && !shared_info->native()) {
     const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
+    Node* frame_state_before = NodeProperties::FindFrameStateBefore(node);
     Node* effect = NodeProperties::GetEffectInput(node);
     Node* convert = jsgraph_->graph()->NewNode(
         jsgraph_->javascript()->ConvertReceiver(p.convert_mode()),
-        call.receiver(), context, call.frame_state_before(), effect, start);
+        call.receiver(), context, frame_state_before, effect, start);
     NodeProperties::ReplaceValueInput(node, convert, 1);
     NodeProperties::ReplaceEffectInput(node, convert);
   }
@@ -558,6 +517,8 @@
   return InlineCall(node, new_target, context, frame_state, start, end);
 }
 
+Graph* JSInliner::graph() const { return jsgraph()->graph(); }
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/js-inlining.h b/src/compiler/js-inlining.h
index d0ab7c0..88cbf89 100644
--- a/src/compiler/js-inlining.h
+++ b/src/compiler/js-inlining.h
@@ -36,9 +36,12 @@
   Reduction ReduceJSCall(Node* node, Handle<JSFunction> function);
 
  private:
-  Zone* local_zone_;
+  Graph* graph() const;
+  JSGraph* jsgraph() const { return jsgraph_; }
+
+  Zone* const local_zone_;
   CompilationInfo* info_;
-  JSGraph* jsgraph_;
+  JSGraph* const jsgraph_;
 
   Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
                                    int parameter_count,
diff --git a/src/compiler/js-intrinsic-lowering.cc b/src/compiler/js-intrinsic-lowering.cc
index 70bcda5..8d24013 100644
--- a/src/compiler/js-intrinsic-lowering.cc
+++ b/src/compiler/js-intrinsic-lowering.cc
@@ -30,8 +30,6 @@
       Runtime::FunctionForId(CallRuntimeParametersOf(node->op()).id());
   if (f->intrinsic_type != Runtime::IntrinsicType::INLINE) return NoChange();
   switch (f->function_id) {
-    case Runtime::kInlineConstructDouble:
-      return ReduceConstructDouble(node);
     case Runtime::kInlineCreateIterResultObject:
       return ReduceCreateIterResultObject(node);
     case Runtime::kInlineDeoptimizeNow:
@@ -40,6 +38,12 @@
       return ReduceDoubleHi(node);
     case Runtime::kInlineDoubleLo:
       return ReduceDoubleLo(node);
+    case Runtime::kInlineGeneratorClose:
+      return ReduceGeneratorClose(node);
+    case Runtime::kInlineGeneratorGetInputOrDebugPos:
+      return ReduceGeneratorGetInputOrDebugPos(node);
+    case Runtime::kInlineGeneratorGetResumeMode:
+      return ReduceGeneratorGetResumeMode(node);
     case Runtime::kInlineIsArray:
       return ReduceIsInstanceType(node, JS_ARRAY_TYPE);
     case Runtime::kInlineIsTypedArray:
@@ -103,19 +107,6 @@
 }
 
 
-Reduction JSIntrinsicLowering::ReduceConstructDouble(Node* node) {
-  Node* high = NodeProperties::GetValueInput(node, 0);
-  Node* low = NodeProperties::GetValueInput(node, 1);
-  Node* value =
-      graph()->NewNode(machine()->Float64InsertHighWord32(),
-                       graph()->NewNode(machine()->Float64InsertLowWord32(),
-                                        jsgraph()->Constant(0), low),
-                       high);
-  ReplaceWithValue(node, value);
-  return Replace(value);
-}
-
-
 Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
   if (mode() != kDeoptimizationEnabled) return NoChange();
   Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
@@ -152,6 +143,39 @@
   return Change(node, machine()->Float64ExtractLowWord32());
 }
 
+Reduction JSIntrinsicLowering::ReduceGeneratorClose(Node* node) {
+  Node* const generator = NodeProperties::GetValueInput(node, 0);
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  Node* const control = NodeProperties::GetControlInput(node);
+  Node* const closed = jsgraph()->Constant(JSGeneratorObject::kGeneratorClosed);
+  Node* const undefined = jsgraph()->UndefinedConstant();
+  Operator const* const op = simplified()->StoreField(
+      AccessBuilder::ForJSGeneratorObjectContinuation());
+
+  ReplaceWithValue(node, undefined, node);
+  NodeProperties::RemoveType(node);
+  return Change(node, op, generator, closed, effect, control);
+}
+
+Reduction JSIntrinsicLowering::ReduceGeneratorGetInputOrDebugPos(Node* node) {
+  Node* const generator = NodeProperties::GetValueInput(node, 0);
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  Node* const control = NodeProperties::GetControlInput(node);
+  Operator const* const op = simplified()->LoadField(
+      AccessBuilder::ForJSGeneratorObjectInputOrDebugPos());
+
+  return Change(node, op, generator, effect, control);
+}
+
+Reduction JSIntrinsicLowering::ReduceGeneratorGetResumeMode(Node* node) {
+  Node* const generator = NodeProperties::GetValueInput(node, 0);
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  Node* const control = NodeProperties::GetControlInput(node);
+  Operator const* const op =
+      simplified()->LoadField(AccessBuilder::ForJSGeneratorObjectResumeMode());
+
+  return Change(node, op, generator, effect, control);
+}
 
 Reduction JSIntrinsicLowering::ReduceIsInstanceType(
     Node* node, InstanceType instance_type) {
diff --git a/src/compiler/js-intrinsic-lowering.h b/src/compiler/js-intrinsic-lowering.h
index 59e6f49..f4b8695 100644
--- a/src/compiler/js-intrinsic-lowering.h
+++ b/src/compiler/js-intrinsic-lowering.h
@@ -37,11 +37,13 @@
   Reduction Reduce(Node* node) final;
 
  private:
-  Reduction ReduceConstructDouble(Node* node);
   Reduction ReduceCreateIterResultObject(Node* node);
   Reduction ReduceDeoptimizeNow(Node* node);
   Reduction ReduceDoubleHi(Node* node);
   Reduction ReduceDoubleLo(Node* node);
+  Reduction ReduceGeneratorClose(Node* node);
+  Reduction ReduceGeneratorGetInputOrDebugPos(Node* node);
+  Reduction ReduceGeneratorGetResumeMode(Node* node);
   Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
   Reduction ReduceIsJSReceiver(Node* node);
   Reduction ReduceIsSmi(Node* node);
diff --git a/src/compiler/js-native-context-specialization.cc b/src/compiler/js-native-context-specialization.cc
index fbc064c..81d4cd0 100644
--- a/src/compiler/js-native-context-specialization.cc
+++ b/src/compiler/js-native-context-specialization.cc
@@ -15,7 +15,6 @@
 #include "src/compiler/node-matchers.h"
 #include "src/field-index-inl.h"
 #include "src/isolate-inl.h"
-#include "src/objects-inl.h"  // TODO(mstarzinger): Temporary cycle breaker!
 #include "src/type-cache.h"
 #include "src/type-feedback-vector.h"
 
@@ -79,9 +78,9 @@
          node->opcode() == IrOpcode::kJSLoadProperty ||
          node->opcode() == IrOpcode::kJSStoreProperty);
   Node* receiver = NodeProperties::GetValueInput(node, 0);
-  Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
+  Node* frame_state = NodeProperties::FindFrameStateBefore(node);
 
   // Not much we can do if deoptimization support is disabled.
   if (!(flags() & kDeoptimizationEnabled)) return NoChange();
@@ -112,8 +111,8 @@
   if (index != nullptr) {
     Node* check = graph()->NewNode(simplified()->ReferenceEqual(Type::Name()),
                                    index, jsgraph()->HeapConstant(name));
-    control = graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
-                               effect, control);
+    control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                        frame_state, effect, control);
   }
 
   // Check if {receiver} may be a number.
@@ -126,17 +125,17 @@
   }
 
   // Ensure that {receiver} is a heap object.
-  Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
   Node* receiverissmi_control = nullptr;
   Node* receiverissmi_effect = effect;
   if (receiverissmi_possible) {
+    Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
     Node* branch = graph()->NewNode(common()->Branch(), check, control);
     control = graph()->NewNode(common()->IfFalse(), branch);
     receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
     receiverissmi_effect = effect;
   } else {
-    control = graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
-                               effect, control);
+    receiver = effect = graph()->NewNode(simplified()->CheckTaggedPointer(),
+                                         receiver, effect, control);
   }
 
   // Load the {receiver} map. The resulting effect is the dominating effect for
@@ -159,7 +158,7 @@
     if (receiver_type->Is(Type::String())) {
       Node* check = graph()->NewNode(simplified()->ObjectIsString(), receiver);
       if (j == access_infos.size() - 1) {
-        this_control =
+        this_control = this_effect =
             graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
                              this_effect, fallthrough_control);
         fallthrough_control = nullptr;
@@ -182,10 +181,11 @@
             graph()->NewNode(simplified()->ReferenceEqual(Type::Internal()),
                              receiver_map, jsgraph()->Constant(map));
         if (--num_classes == 0 && j == access_infos.size() - 1) {
-          this_controls.push_back(
+          Node* deoptimize =
               graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
-                               this_effect, fallthrough_control));
-          this_effects.push_back(this_effect);
+                               this_effect, fallthrough_control);
+          this_controls.push_back(deoptimize);
+          this_effects.push_back(deoptimize);
           fallthrough_control = nullptr;
         } else {
           Node* branch =
@@ -237,38 +237,14 @@
       if (access_mode == AccessMode::kStore) {
         Node* check = graph()->NewNode(
             simplified()->ReferenceEqual(Type::Tagged()), value, this_value);
-        this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
-                                        frame_state, this_effect, this_control);
+        this_control = this_effect =
+            graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+                             this_effect, this_control);
       }
     } else {
       DCHECK(access_info.IsDataField());
       FieldIndex const field_index = access_info.field_index();
-      FieldCheck const field_check = access_info.field_check();
       Type* const field_type = access_info.field_type();
-      switch (field_check) {
-        case FieldCheck::kNone:
-          break;
-        case FieldCheck::kJSArrayBufferViewBufferNotNeutered: {
-          Node* this_buffer = this_effect =
-              graph()->NewNode(simplified()->LoadField(
-                                   AccessBuilder::ForJSArrayBufferViewBuffer()),
-                               this_receiver, this_effect, this_control);
-          Node* this_buffer_bit_field = this_effect =
-              graph()->NewNode(simplified()->LoadField(
-                                   AccessBuilder::ForJSArrayBufferBitField()),
-                               this_buffer, this_effect, this_control);
-          Node* check = graph()->NewNode(
-              machine()->Word32Equal(),
-              graph()->NewNode(machine()->Word32And(), this_buffer_bit_field,
-                               jsgraph()->Int32Constant(
-                                   1 << JSArrayBuffer::WasNeutered::kShift)),
-              jsgraph()->Int32Constant(0));
-          this_control =
-              graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
-                               this_effect, this_control);
-          break;
-        }
-      }
       if (access_mode == AccessMode::kLoad &&
           access_info.holder().ToHandle(&holder)) {
         this_receiver = jsgraph()->Constant(holder);
@@ -284,6 +260,11 @@
           field_type,  MachineType::AnyTagged(), kFullWriteBarrier};
       if (access_mode == AccessMode::kLoad) {
         if (field_type->Is(Type::UntaggedFloat64())) {
+          // TODO(turbofan): We remove the representation axis from the type to
+          // avoid uninhabited representation types. This is a workaround until
+          // the {PropertyAccessInfo} is using {MachineRepresentation} instead.
+          field_access.type = Type::Union(
+              field_type, Type::Representation(Type::Number(), zone()), zone());
           if (!field_index.is_inobject() || field_index.is_hidden_field() ||
               !FLAG_unbox_double_fields) {
             this_storage = this_effect =
@@ -300,9 +281,14 @@
       } else {
         DCHECK_EQ(AccessMode::kStore, access_mode);
         if (field_type->Is(Type::UntaggedFloat64())) {
+          // TODO(turbofan): We remove the representation axis from the type to
+          // avoid uninhabited representation types. This is a workaround until
+          // the {PropertyAccessInfo} is using {MachineRepresentation} instead.
+          field_access.type = Type::Union(
+              field_type, Type::Representation(Type::Number(), zone()), zone());
           Node* check =
               graph()->NewNode(simplified()->ObjectIsNumber(), this_value);
-          this_control =
+          this_control = this_effect =
               graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
                                this_effect, this_control);
           this_value = graph()->NewNode(simplified()->TypeGuard(Type::Number()),
@@ -312,8 +298,9 @@
               !FLAG_unbox_double_fields) {
             if (access_info.HasTransitionMap()) {
               // Allocate a MutableHeapNumber for the new property.
-              this_effect =
-                  graph()->NewNode(common()->BeginRegion(), this_effect);
+              this_effect = graph()->NewNode(
+                  common()->BeginRegion(RegionObservability::kNotObservable),
+                  this_effect);
               Node* this_box = this_effect =
                   graph()->NewNode(simplified()->Allocate(NOT_TENURED),
                                    jsgraph()->Constant(HeapNumber::kSize),
@@ -343,19 +330,12 @@
             field_access.machine_type = MachineType::Float64();
           }
         } else if (field_type->Is(Type::TaggedSigned())) {
-          Node* check =
-              graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
-          this_control =
-              graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+          this_value = this_effect =
+              graph()->NewNode(simplified()->CheckTaggedSigned(), this_value,
                                this_effect, this_control);
-          this_value =
-              graph()->NewNode(simplified()->TypeGuard(type_cache_.kSmi),
-                               this_value, this_control);
         } else if (field_type->Is(Type::TaggedPointer())) {
-          Node* check =
-              graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
-          this_control =
-              graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
+          this_value = this_effect =
+              graph()->NewNode(simplified()->CheckTaggedPointer(), this_value,
                                this_effect, this_control);
           if (field_type->NumClasses() == 1) {
             // Emit a map check for the value.
@@ -365,7 +345,7 @@
             Node* check = graph()->NewNode(
                 simplified()->ReferenceEqual(Type::Internal()), this_value_map,
                 jsgraph()->Constant(field_type->Classes().Current()));
-            this_control =
+            this_control = this_effect =
                 graph()->NewNode(common()->DeoptimizeUnless(), check,
                                  frame_state, this_effect, this_control);
           } else {
@@ -376,7 +356,9 @@
         }
         Handle<Map> transition_map;
         if (access_info.transition_map().ToHandle(&transition_map)) {
-          this_effect = graph()->NewNode(common()->BeginRegion(), this_effect);
+          this_effect = graph()->NewNode(
+              common()->BeginRegion(RegionObservability::kObservable),
+              this_effect);
           this_effect = graph()->NewNode(
               simplified()->StoreField(AccessBuilder::ForMap()), this_receiver,
               jsgraph()->Constant(transition_map), this_effect, this_control);
@@ -522,9 +504,9 @@
          node->opcode() == IrOpcode::kJSStoreProperty);
   Node* receiver = NodeProperties::GetValueInput(node, 0);
   Node* context = NodeProperties::GetContextInput(node);
-  Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
+  Node* frame_state = NodeProperties::FindFrameStateBefore(node);
 
   // Not much we can do if deoptimization support is disabled.
   if (!(flags() & kDeoptimizationEnabled)) return NoChange();
@@ -555,9 +537,8 @@
   ZoneVector<Node*> controls(zone());
 
   // Ensure that {receiver} is a heap object.
-  Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
-  control = graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
-                             effect, control);
+  receiver = effect = graph()->NewNode(simplified()->CheckTaggedPointer(),
+                                       receiver, effect, control);
 
   // Load the {receiver} map. The resulting effect is the dominating effect for
   // all (polymorphic) branches.
@@ -597,17 +578,19 @@
           // TODO(turbofan): This is ugly as hell! We should probably introduce
           // macro-ish operators for property access that encapsulate this whole
           // mess.
-          this_controls.push_back(graph()->NewNode(common()->DeoptimizeUnless(),
-                                                   check, frame_state, effect,
-                                                   fallthrough_control));
+          Node* deoptimize =
+              graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+                               effect, fallthrough_control);
+          this_controls.push_back(deoptimize);
+          this_effects.push_back(deoptimize);
           fallthrough_control = nullptr;
         } else {
           Node* branch =
               graph()->NewNode(common()->Branch(), check, fallthrough_control);
           this_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+          this_effects.push_back(effect);
           fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
         }
-        this_effects.push_back(effect);
         if (!map->IsJSArrayMap()) receiver_is_jsarray = false;
       }
 
@@ -624,7 +607,7 @@
             simplified()->ReferenceEqual(Type::Any()), receiver_map,
             jsgraph()->HeapConstant(transition_source));
         if (--num_transitions == 0 && j == access_infos.size() - 1) {
-          transition_control =
+          transition_control = transition_effect =
               graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
                                transition_effect, fallthrough_control);
           fallthrough_control = nullptr;
@@ -647,8 +630,7 @@
           // Instance migration, let the stub deal with the {receiver}.
           TransitionElementsKindStub stub(isolate(),
                                           transition_source->elements_kind(),
-                                          transition_target->elements_kind(),
-                                          transition_source->IsJSArrayMap());
+                                          transition_target->elements_kind());
           CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
               isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 0,
               CallDescriptor::kNeedsFrameState, node->op()->properties());
@@ -657,6 +639,7 @@
               receiver, jsgraph()->HeapConstant(transition_target), context,
               frame_state, transition_effect, transition_control);
         }
+
         this_controls.push_back(transition_control);
         this_effects.push_back(transition_effect);
       }
@@ -675,6 +658,14 @@
             graph()->NewNode(common()->EffectPhi(this_control_count),
                              this_control_count + 1, &this_effects.front());
       }
+
+      // TODO(turbofan): The effect/control linearization will not find a
+      // FrameState after the StoreField or Call that is generated for the
+      // elements kind transition above. This is because those operators
+      // don't have the kNoWrite flag on it, even though they are not
+      // observable by JavaScript.
+      this_effect = graph()->NewNode(common()->Checkpoint(), frame_state,
+                                     this_effect, this_control);
     }
 
     // Certain stores need a prototype chain check because shape changes
@@ -685,28 +676,6 @@
       AssumePrototypesStable(receiver_type, native_context, holder);
     }
 
-    // Check that the {index} is actually a Number.
-    if (!NumberMatcher(this_index).HasValue()) {
-      Node* check =
-          graph()->NewNode(simplified()->ObjectIsNumber(), this_index);
-      this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
-                                      frame_state, this_effect, this_control);
-      this_index = graph()->NewNode(simplified()->TypeGuard(Type::Number()),
-                                    this_index, this_control);
-    }
-
-    // Convert the {index} to an unsigned32 value and check if the result is
-    // equal to the original {index}.
-    if (!NumberMatcher(this_index).IsInRange(0.0, kMaxUInt32)) {
-      Node* this_index32 =
-          graph()->NewNode(simplified()->NumberToUint32(), this_index);
-      Node* check = graph()->NewNode(simplified()->NumberEqual(), this_index32,
-                                     this_index);
-      this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
-                                      frame_state, this_effect, this_control);
-      this_index = this_index32;
-    }
-
     // TODO(bmeurer): We currently specialize based on elements kind. We should
     // also be able to properly support strings and other JSObjects here.
     ElementsKind elements_kind = access_info.elements_kind();
@@ -725,8 +694,9 @@
       Node* check = graph()->NewNode(
           simplified()->ReferenceEqual(Type::Any()), this_elements_map,
           jsgraph()->HeapConstant(factory()->fixed_array_map()));
-      this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
-                                      frame_state, this_effect, this_control);
+      this_control = this_effect =
+          graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+                           this_effect, this_control);
     }
 
     // Load the length of the {receiver}.
@@ -741,10 +711,9 @@
                   this_elements, this_effect, this_control);
 
     // Check that the {index} is in the valid range for the {receiver}.
-    Node* check = graph()->NewNode(simplified()->NumberLessThan(), this_index,
-                                   this_length);
-    this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
-                                    frame_state, this_effect, this_control);
+    this_index = this_effect =
+        graph()->NewNode(simplified()->CheckBounds(), this_index, this_length,
+                         this_effect, this_control);
 
     // Compute the element access.
     Type* element_type = Type::Any();
@@ -781,45 +750,26 @@
       if (elements_kind == FAST_HOLEY_ELEMENTS ||
           elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
         // Perform the hole check on the result.
-        Node* check =
-            graph()->NewNode(simplified()->ReferenceEqual(element_access.type),
-                             this_value, jsgraph()->TheHoleConstant());
+        CheckTaggedHoleMode mode = CheckTaggedHoleMode::kNeverReturnHole;
         // Check if we are allowed to turn the hole into undefined.
         Type* initial_holey_array_type = Type::Class(
             handle(isolate()->get_initial_js_array_map(elements_kind)),
             graph()->zone());
         if (receiver_type->NowIs(initial_holey_array_type) &&
             isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
-          Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                          check, this_control);
-          Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-          Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
           // Add a code dependency on the array protector cell.
           AssumePrototypesStable(receiver_type, native_context,
                                  isolate()->initial_object_prototype());
           dependencies()->AssumePropertyCell(factory()->array_protector());
           // Turn the hole into undefined.
-          this_control =
-              graph()->NewNode(common()->Merge(2), if_true, if_false);
-          this_value = graph()->NewNode(
-              common()->Phi(MachineRepresentation::kTagged, 2),
-              jsgraph()->UndefinedConstant(), this_value, this_control);
-          element_type =
-              Type::Union(element_type, Type::Undefined(), graph()->zone());
-        } else {
-          // Deoptimize in case of the hole.
-          this_control =
-              graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
-                               this_effect, this_control);
+          mode = CheckTaggedHoleMode::kConvertHoleToUndefined;
         }
-        // Rename the result to represent the actual type (not polluted by the
-        // hole).
-        this_value = graph()->NewNode(simplified()->TypeGuard(element_type),
-                                      this_value, this_control);
+        this_value = this_effect =
+            graph()->NewNode(simplified()->CheckTaggedHole(mode), this_value,
+                             this_effect, this_control);
       } else if (elements_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
         // Perform the hole check on the result.
-        Node* check =
-            graph()->NewNode(simplified()->NumberIsHoleNaN(), this_value);
+        CheckFloat64HoleMode mode = CheckFloat64HoleMode::kNeverReturnHole;
         // Check if we are allowed to return the hole directly.
         Type* initial_holey_array_type = Type::Class(
             handle(isolate()->get_initial_js_array_map(elements_kind)),
@@ -830,33 +780,32 @@
           AssumePrototypesStable(receiver_type, native_context,
                                  isolate()->initial_object_prototype());
           dependencies()->AssumePropertyCell(factory()->array_protector());
-          // Turn the hole into undefined.
-          this_value = graph()->NewNode(
-              common()->Select(MachineRepresentation::kTagged,
-                               BranchHint::kFalse),
-              check, jsgraph()->UndefinedConstant(), this_value);
-        } else {
-          // Deoptimize in case of the hole.
-          this_control =
-              graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
-                               this_effect, this_control);
+          // Return the signaling NaN hole directly if all uses are truncating.
+          mode = CheckFloat64HoleMode::kAllowReturnHole;
         }
+        this_value = this_effect =
+            graph()->NewNode(simplified()->CheckFloat64Hole(mode), this_value,
+                             this_effect, this_control);
       }
     } else {
       DCHECK_EQ(AccessMode::kStore, access_mode);
       if (IsFastSmiElementsKind(elements_kind)) {
-        Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
-        this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
-                                        frame_state, this_effect, this_control);
-        this_value = graph()->NewNode(simplified()->TypeGuard(type_cache_.kSmi),
-                                      this_value, this_control);
+        this_value = this_effect =
+            graph()->NewNode(simplified()->CheckTaggedSigned(), this_value,
+                             this_effect, this_control);
       } else if (IsFastDoubleElementsKind(elements_kind)) {
         Node* check =
             graph()->NewNode(simplified()->ObjectIsNumber(), this_value);
-        this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
-                                        frame_state, this_effect, this_control);
+        this_control = this_effect =
+            graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+                             this_effect, this_control);
         this_value = graph()->NewNode(simplified()->TypeGuard(Type::Number()),
                                       this_value, this_control);
+        // Make sure we do not store signalling NaNs into holey double arrays.
+        if (elements_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
+          this_value =
+              graph()->NewNode(simplified()->NumberSilenceNaN(), this_value);
+        }
       }
       this_effect = graph()->NewNode(simplified()->StoreElement(element_access),
                                      this_elements, this_index, this_value,
@@ -960,9 +909,9 @@
 
 
 Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(Node* node) {
-  Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
+  Node* frame_state = NodeProperties::FindFrameStateBefore(node);
   Node* deoptimize =
       graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kSoft), frame_state,
                        effect, control);
diff --git a/src/compiler/js-operator.cc b/src/compiler/js-operator.cc
index dfbe742..89c0eee 100644
--- a/src/compiler/js-operator.cc
+++ b/src/compiler/js-operator.cc
@@ -9,8 +9,8 @@
 #include "src/base/lazy-instance.h"
 #include "src/compiler/opcodes.h"
 #include "src/compiler/operator.h"
-#include "src/objects-inl.h"  // TODO(mstarzinger): Temporary cycle breaker!
-#include "src/type-feedback-vector-inl.h"
+#include "src/handles-inl.h"
+#include "src/type-feedback-vector.h"
 
 namespace v8 {
 namespace internal {
@@ -376,34 +376,54 @@
   return OpParameter<CreateLiteralParameters>(op);
 }
 
-#define CACHED_OP_LIST(V)                                  \
-  V(Equal, Operator::kNoProperties, 2, 1)                  \
-  V(NotEqual, Operator::kNoProperties, 2, 1)               \
-  V(StrictEqual, Operator::kPure, 2, 1)                    \
-  V(StrictNotEqual, Operator::kPure, 2, 1)                 \
-  V(LessThan, Operator::kNoProperties, 2, 1)               \
-  V(GreaterThan, Operator::kNoProperties, 2, 1)            \
-  V(LessThanOrEqual, Operator::kNoProperties, 2, 1)        \
-  V(GreaterThanOrEqual, Operator::kNoProperties, 2, 1)     \
-  V(ToInteger, Operator::kNoProperties, 1, 1)              \
-  V(ToLength, Operator::kNoProperties, 1, 1)               \
-  V(ToName, Operator::kNoProperties, 1, 1)                 \
-  V(ToNumber, Operator::kNoProperties, 1, 1)               \
-  V(ToObject, Operator::kFoldable, 1, 1)                   \
-  V(ToString, Operator::kNoProperties, 1, 1)               \
-  V(Create, Operator::kEliminatable, 2, 1)                 \
-  V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
-  V(HasProperty, Operator::kNoProperties, 2, 1)            \
-  V(TypeOf, Operator::kPure, 1, 1)                         \
-  V(InstanceOf, Operator::kNoProperties, 2, 1)             \
-  V(ForInDone, Operator::kPure, 2, 1)                      \
-  V(ForInNext, Operator::kNoProperties, 4, 1)              \
-  V(ForInPrepare, Operator::kNoProperties, 1, 3)           \
-  V(ForInStep, Operator::kPure, 1, 1)                      \
-  V(LoadMessage, Operator::kNoThrow, 0, 1)                 \
-  V(StoreMessage, Operator::kNoThrow, 1, 0)                \
-  V(StackCheck, Operator::kNoProperties, 0, 0)             \
-  V(CreateWithContext, Operator::kNoProperties, 2, 1)      \
+const BinaryOperationHints& BinaryOperationHintsOf(const Operator* op) {
+  DCHECK(op->opcode() == IrOpcode::kJSBitwiseOr ||
+         op->opcode() == IrOpcode::kJSBitwiseXor ||
+         op->opcode() == IrOpcode::kJSBitwiseAnd ||
+         op->opcode() == IrOpcode::kJSShiftLeft ||
+         op->opcode() == IrOpcode::kJSShiftRight ||
+         op->opcode() == IrOpcode::kJSShiftRightLogical ||
+         op->opcode() == IrOpcode::kJSAdd ||
+         op->opcode() == IrOpcode::kJSSubtract ||
+         op->opcode() == IrOpcode::kJSMultiply ||
+         op->opcode() == IrOpcode::kJSDivide ||
+         op->opcode() == IrOpcode::kJSModulus);
+  return OpParameter<BinaryOperationHints>(op);
+}
+
+const CompareOperationHints& CompareOperationHintsOf(const Operator* op) {
+  DCHECK(op->opcode() == IrOpcode::kJSEqual ||
+         op->opcode() == IrOpcode::kJSNotEqual ||
+         op->opcode() == IrOpcode::kJSStrictEqual ||
+         op->opcode() == IrOpcode::kJSStrictNotEqual ||
+         op->opcode() == IrOpcode::kJSLessThan ||
+         op->opcode() == IrOpcode::kJSGreaterThan ||
+         op->opcode() == IrOpcode::kJSLessThanOrEqual ||
+         op->opcode() == IrOpcode::kJSGreaterThanOrEqual);
+  return OpParameter<CompareOperationHints>(op);
+}
+
+#define CACHED_OP_LIST(V)                                   \
+  V(ToInteger, Operator::kNoProperties, 1, 1)               \
+  V(ToLength, Operator::kNoProperties, 1, 1)                \
+  V(ToName, Operator::kNoProperties, 1, 1)                  \
+  V(ToNumber, Operator::kNoProperties, 1, 1)                \
+  V(ToObject, Operator::kFoldable, 1, 1)                    \
+  V(ToString, Operator::kNoProperties, 1, 1)                \
+  V(Create, Operator::kEliminatable, 2, 1)                  \
+  V(CreateIterResultObject, Operator::kEliminatable, 2, 1)  \
+  V(HasProperty, Operator::kNoProperties, 2, 1)             \
+  V(TypeOf, Operator::kPure, 1, 1)                          \
+  V(InstanceOf, Operator::kNoProperties, 2, 1)              \
+  V(ForInDone, Operator::kPure, 2, 1)                       \
+  V(ForInNext, Operator::kNoProperties, 4, 1)               \
+  V(ForInPrepare, Operator::kNoProperties, 1, 3)            \
+  V(ForInStep, Operator::kPure, 1, 1)                       \
+  V(LoadMessage, Operator::kNoThrow, 0, 1)                  \
+  V(StoreMessage, Operator::kNoThrow, 1, 0)                 \
+  V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
+  V(StackCheck, Operator::kNoProperties, 0, 0)              \
+  V(CreateWithContext, Operator::kNoProperties, 2, 1)       \
   V(CreateModuleContext, Operator::kNoProperties, 2, 1)
 
 struct JSOperatorGlobalCache final {
@@ -537,6 +557,79 @@
       hints);                                           // parameter
 }
 
+const Operator* JSOperatorBuilder::Equal(CompareOperationHints hints) {
+  // TODO(turbofan): Cache most important versions of this operator.
+  return new (zone()) Operator1<CompareOperationHints>(  //--
+      IrOpcode::kJSEqual, Operator::kNoProperties,       // opcode
+      "JSEqual",                                         // name
+      2, 1, 1, 1, 1, 2,                                  // inputs/outputs
+      hints);                                            // parameter
+}
+
+const Operator* JSOperatorBuilder::NotEqual(CompareOperationHints hints) {
+  // TODO(turbofan): Cache most important versions of this operator.
+  return new (zone()) Operator1<CompareOperationHints>(  //--
+      IrOpcode::kJSNotEqual, Operator::kNoProperties,    // opcode
+      "JSNotEqual",                                      // name
+      2, 1, 1, 1, 1, 2,                                  // inputs/outputs
+      hints);                                            // parameter
+}
+
+const Operator* JSOperatorBuilder::StrictEqual(CompareOperationHints hints) {
+  // TODO(turbofan): Cache most important versions of this operator.
+  return new (zone()) Operator1<CompareOperationHints>(  //--
+      IrOpcode::kJSStrictEqual, Operator::kPure,         // opcode
+      "JSStrictEqual",                                   // name
+      2, 0, 0, 1, 0, 0,                                  // inputs/outputs
+      hints);                                            // parameter
+}
+
+const Operator* JSOperatorBuilder::StrictNotEqual(CompareOperationHints hints) {
+  // TODO(turbofan): Cache most important versions of this operator.
+  return new (zone()) Operator1<CompareOperationHints>(  //--
+      IrOpcode::kJSStrictNotEqual, Operator::kPure,      // opcode
+      "JSStrictNotEqual",                                // name
+      2, 0, 0, 1, 0, 0,                                  // inputs/outputs
+      hints);                                            // parameter
+}
+
+const Operator* JSOperatorBuilder::LessThan(CompareOperationHints hints) {
+  // TODO(turbofan): Cache most important versions of this operator.
+  return new (zone()) Operator1<CompareOperationHints>(  //--
+      IrOpcode::kJSLessThan, Operator::kNoProperties,    // opcode
+      "JSLessThan",                                      // name
+      2, 1, 1, 1, 1, 2,                                  // inputs/outputs
+      hints);                                            // parameter
+}
+
+const Operator* JSOperatorBuilder::GreaterThan(CompareOperationHints hints) {
+  // TODO(turbofan): Cache most important versions of this operator.
+  return new (zone()) Operator1<CompareOperationHints>(   //--
+      IrOpcode::kJSGreaterThan, Operator::kNoProperties,  // opcode
+      "JSGreaterThan",                                    // name
+      2, 1, 1, 1, 1, 2,                                   // inputs/outputs
+      hints);                                             // parameter
+}
+
+const Operator* JSOperatorBuilder::LessThanOrEqual(
+    CompareOperationHints hints) {
+  // TODO(turbofan): Cache most important versions of this operator.
+  return new (zone()) Operator1<CompareOperationHints>(       //--
+      IrOpcode::kJSLessThanOrEqual, Operator::kNoProperties,  // opcode
+      "JSLessThanOrEqual",                                    // name
+      2, 1, 1, 1, 1, 2,                                       // inputs/outputs
+      hints);                                                 // parameter
+}
+
+const Operator* JSOperatorBuilder::GreaterThanOrEqual(
+    CompareOperationHints hints) {
+  // TODO(turbofan): Cache most important versions of this operator.
+  return new (zone()) Operator1<CompareOperationHints>(          //--
+      IrOpcode::kJSGreaterThanOrEqual, Operator::kNoProperties,  // opcode
+      "JSGreaterThanOrEqual",                                    // name
+      2, 1, 1, 1, 1, 2,  // inputs/outputs
+      hints);            // parameter
+}
 
 const Operator* JSOperatorBuilder::ToBoolean(ToBooleanHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
@@ -625,6 +718,21 @@
       access);                                             // parameter
 }
 
+const Operator* JSOperatorBuilder::GeneratorStore(int register_count) {
+  return new (zone()) Operator1<int>(                   // --
+      IrOpcode::kJSGeneratorStore, Operator::kNoThrow,  // opcode
+      "JSGeneratorStore",                               // name
+      3 + register_count, 1, 1, 0, 1, 0,                // counts
+      register_count);                                  // parameter
+}
+
+const Operator* JSOperatorBuilder::GeneratorRestoreRegister(int index) {
+  return new (zone()) Operator1<int>(                             // --
+      IrOpcode::kJSGeneratorRestoreRegister, Operator::kNoThrow,  // opcode
+      "JSGeneratorRestoreRegister",                               // name
+      1, 1, 1, 1, 1, 0,                                           // counts
+      index);                                                     // parameter
+}
 
 const Operator* JSOperatorBuilder::StoreNamed(LanguageMode language_mode,
                                               Handle<Name> name,
diff --git a/src/compiler/js-operator.h b/src/compiler/js-operator.h
index 750817a..8390cbd 100644
--- a/src/compiler/js-operator.h
+++ b/src/compiler/js-operator.h
@@ -344,7 +344,6 @@
 
 const CreateClosureParameters& CreateClosureParametersOf(const Operator* op);
 
-
 // Defines shared information for the literal that should be created. This is
 // used as parameter by JSCreateLiteralArray, JSCreateLiteralObject and
 // JSCreateLiteralRegExp operators.
@@ -375,6 +374,9 @@
 
 const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op);
 
+const BinaryOperationHints& BinaryOperationHintsOf(const Operator* op);
+
+const CompareOperationHints& CompareOperationHintsOf(const Operator* op);
 
 // Interface for building JavaScript-level operators, e.g. directly from the
 // AST. Most operators have no parameters, thus can be globally shared for all
@@ -383,14 +385,14 @@
  public:
   explicit JSOperatorBuilder(Zone* zone);
 
-  const Operator* Equal();
-  const Operator* NotEqual();
-  const Operator* StrictEqual();
-  const Operator* StrictNotEqual();
-  const Operator* LessThan();
-  const Operator* GreaterThan();
-  const Operator* LessThanOrEqual();
-  const Operator* GreaterThanOrEqual();
+  const Operator* Equal(CompareOperationHints hints);
+  const Operator* NotEqual(CompareOperationHints hints);
+  const Operator* StrictEqual(CompareOperationHints hints);
+  const Operator* StrictNotEqual(CompareOperationHints hints);
+  const Operator* LessThan(CompareOperationHints hints);
+  const Operator* GreaterThan(CompareOperationHints hints);
+  const Operator* LessThanOrEqual(CompareOperationHints hints);
+  const Operator* GreaterThanOrEqual(CompareOperationHints hints);
   const Operator* BitwiseOr(BinaryOperationHints hints);
   const Operator* BitwiseXor(BinaryOperationHints hints);
   const Operator* BitwiseAnd(BinaryOperationHints hints);
@@ -470,6 +472,13 @@
   const Operator* LoadMessage();
   const Operator* StoreMessage();
 
+  // Used to implement Ignition's SuspendGenerator bytecode.
+  const Operator* GeneratorStore(int register_count);
+
+  // Used to implement Ignition's ResumeGenerator bytecode.
+  const Operator* GeneratorRestoreContinuation();
+  const Operator* GeneratorRestoreRegister(int index);
+
   const Operator* StackCheck();
 
   const Operator* CreateFunctionContext(int slot_count);
diff --git a/src/compiler/js-typed-lowering.cc b/src/compiler/js-typed-lowering.cc
index 8099533..fcfe134 100644
--- a/src/compiler/js-typed-lowering.cc
+++ b/src/compiler/js-typed-lowering.cc
@@ -27,7 +27,42 @@
   JSBinopReduction(JSTypedLowering* lowering, Node* node)
       : lowering_(lowering), node_(node) {}
 
-  void ConvertInputsToNumberOrUndefined(Node* frame_state) {
+  BinaryOperationHints::Hint GetNumberBinaryOperationFeedback() {
+    if (!(lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) ||
+        !(lowering_->flags() & JSTypedLowering::kTypeFeedbackEnabled)) {
+      return BinaryOperationHints::kAny;
+    }
+    DCHECK_NE(0, node_->op()->ControlOutputCount());
+    DCHECK_EQ(1, node_->op()->EffectOutputCount());
+    DCHECK_EQ(2, OperatorProperties::GetFrameStateInputCount(node_->op()));
+    BinaryOperationHints hints = BinaryOperationHintsOf(node_->op());
+    BinaryOperationHints::Hint combined = hints.combined();
+    if (combined == BinaryOperationHints::kSignedSmall ||
+        combined == BinaryOperationHints::kSigned32 ||
+        combined == BinaryOperationHints::kNumberOrUndefined) {
+      return combined;
+    }
+    return BinaryOperationHints::kAny;
+  }
+
+  CompareOperationHints::Hint GetNumberCompareOperationFeedback() {
+    if (!(lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) ||
+        !(lowering_->flags() & JSTypedLowering::kTypeFeedbackEnabled)) {
+      return CompareOperationHints::kAny;
+    }
+    DCHECK_NE(0, node_->op()->ControlOutputCount());
+    DCHECK_EQ(1, node_->op()->EffectOutputCount());
+    DCHECK_EQ(2, OperatorProperties::GetFrameStateInputCount(node_->op()));
+    CompareOperationHints hints = CompareOperationHintsOf(node_->op());
+    CompareOperationHints::Hint combined = hints.combined();
+    if (combined == CompareOperationHints::kSignedSmall ||
+        combined == CompareOperationHints::kNumber) {
+      return combined;
+    }
+    return CompareOperationHints::kAny;
+  }
+
+  void ConvertInputsToNumber(Node* frame_state) {
     // To convert the inputs to numbers, we have to provide frame states
     // for lazy bailouts in the ToNumber conversions.
     // We use a little hack here: we take the frame state before the binary
@@ -46,11 +81,11 @@
       ConvertBothInputsToNumber(&left_input, &right_input, frame_state);
     } else {
       left_input = left_is_primitive
-                       ? ConvertPlainPrimitiveToNumberOrUndefined(left())
+                       ? ConvertPlainPrimitiveToNumber(left())
                        : ConvertSingleInputToNumber(
                              left(), CreateFrameStateForLeftInput(frame_state));
       right_input = right_is_primitive
-                        ? ConvertPlainPrimitiveToNumberOrUndefined(right())
+                        ? ConvertPlainPrimitiveToNumber(right())
                         : ConvertSingleInputToNumber(
                               right(), CreateFrameStateForRightInput(
                                            frame_state, left_input));
@@ -107,6 +142,53 @@
     return lowering_->Changed(node_);
   }
 
+  Reduction ChangeToSpeculativeOperator(const Operator* op, Type* upper_bound) {
+    DCHECK_EQ(1, op->EffectInputCount());
+    DCHECK_EQ(1, op->EffectOutputCount());
+    DCHECK_EQ(false, OperatorProperties::HasContextInput(op));
+    DCHECK_EQ(1, op->ControlInputCount());
+    DCHECK_EQ(0, op->ControlOutputCount());
+    DCHECK_EQ(0, OperatorProperties::GetFrameStateInputCount(op));
+    DCHECK_EQ(2, op->ValueInputCount());
+
+    DCHECK_EQ(1, node_->op()->EffectInputCount());
+    DCHECK_EQ(1, node_->op()->EffectOutputCount());
+    DCHECK_EQ(1, node_->op()->ControlInputCount());
+    DCHECK_LT(1, node_->op()->ControlOutputCount());
+    DCHECK_EQ(2, OperatorProperties::GetFrameStateInputCount(node_->op()));
+    DCHECK_EQ(2, node_->op()->ValueInputCount());
+
+    // Reconnect the control output to bypass the IfSuccess node and
+    // possibly disconnect from the IfException node.
+    for (Edge edge : node_->use_edges()) {
+      Node* const user = edge.from();
+      DCHECK(!user->IsDead());
+      if (NodeProperties::IsControlEdge(edge)) {
+        if (user->opcode() == IrOpcode::kIfSuccess) {
+          user->ReplaceUses(NodeProperties::GetControlInput(node_));
+          user->Kill();
+        } else {
+          DCHECK_EQ(user->opcode(), IrOpcode::kIfException);
+          edge.UpdateTo(jsgraph()->Dead());
+        }
+      }
+    }
+
+    // Remove both bailout frame states and the context.
+    node_->RemoveInput(NodeProperties::FirstFrameStateIndex(node_) + 1);
+    node_->RemoveInput(NodeProperties::FirstFrameStateIndex(node_));
+    node_->RemoveInput(NodeProperties::FirstContextIndex(node_));
+
+    NodeProperties::ChangeOp(node_, op);
+
+    // Update the type to number.
+    Type* node_type = NodeProperties::GetType(node_);
+    NodeProperties::SetType(node_,
+                            Type::Intersect(node_type, upper_bound, zone()));
+
+    return lowering_->Changed(node_);
+  }
+
   Reduction ChangeToPureOperator(const Operator* op, Type* type) {
     return ChangeToPureOperator(op, false, type);
   }
@@ -216,17 +298,15 @@
         frame_state->InputAt(kFrameStateOuterStateInput));
   }
 
-  Node* ConvertPlainPrimitiveToNumberOrUndefined(Node* node) {
+  Node* ConvertPlainPrimitiveToNumber(Node* node) {
     DCHECK(NodeProperties::GetType(node)->Is(Type::PlainPrimitive()));
     // Avoid inserting too many eager ToNumber() operations.
     Reduction const reduction = lowering_->ReduceJSToNumberInput(node);
     if (reduction.Changed()) return reduction.replacement();
-    if (NodeProperties::GetType(node)->Is(Type::NumberOrUndefined())) {
+    if (NodeProperties::GetType(node)->Is(Type::Number())) {
       return node;
     }
-    return graph()->NewNode(
-        javascript()->ToNumber(), node, jsgraph()->NoContextConstant(),
-        jsgraph()->EmptyFrameState(), graph()->start(), graph()->start());
+    return graph()->NewNode(simplified()->PlainPrimitiveToNumber(), node);
   }
 
   Node* ConvertSingleInputToNumber(Node* node, Node* frame_state) {
@@ -339,14 +419,31 @@
   if (flags() & kDisableBinaryOpReduction) return NoChange();
 
   JSBinopReduction r(this, node);
-  if (r.BothInputsAre(Type::NumberOrUndefined())) {
+
+  BinaryOperationHints::Hint feedback = r.GetNumberBinaryOperationFeedback();
+  if (feedback == BinaryOperationHints::kNumberOrUndefined &&
+      r.BothInputsAre(Type::PlainPrimitive()) &&
+      r.NeitherInputCanBe(Type::StringOrReceiver())) {
+    // JSAdd(x:-string, y:-string) => NumberAdd(ToNumber(x), ToNumber(y))
+    Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+    r.ConvertInputsToNumber(frame_state);
+    return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
+  }
+  if (feedback != BinaryOperationHints::kAny) {
+    // Lower to the optimistic number binop.
+    return r.ChangeToSpeculativeOperator(
+        simplified()->SpeculativeNumberAdd(feedback), Type::Number());
+  }
+  if (r.BothInputsAre(Type::Number())) {
     // JSAdd(x:number, y:number) => NumberAdd(x, y)
-    return ReduceNumberBinop(node, simplified()->NumberAdd());
+    Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+    r.ConvertInputsToNumber(frame_state);
+    return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
   }
   if (r.NeitherInputCanBe(Type::StringOrReceiver())) {
     // JSAdd(x:-string, y:-string) => NumberAdd(ToNumber(x), ToNumber(y))
     Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
-    r.ConvertInputsToNumberOrUndefined(frame_state);
+    r.ConvertInputsToNumber(frame_state);
     return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
   }
   if (r.OneInputIs(Type::String())) {
@@ -376,31 +473,69 @@
 
 Reduction JSTypedLowering::ReduceJSModulus(Node* node) {
   if (flags() & kDisableBinaryOpReduction) return NoChange();
-
   JSBinopReduction r(this, node);
   if (r.BothInputsAre(Type::Number())) {
     // JSModulus(x:number, x:number) => NumberModulus(x, y)
     return r.ChangeToPureOperator(simplified()->NumberModulus(),
                                   Type::Number());
   }
+  BinaryOperationHints::Hint feedback = r.GetNumberBinaryOperationFeedback();
+  if (feedback != BinaryOperationHints::kAny) {
+    return r.ChangeToSpeculativeOperator(
+        simplified()->SpeculativeNumberModulus(feedback), Type::Number());
+  }
   return NoChange();
 }
 
-
-Reduction JSTypedLowering::ReduceNumberBinop(Node* node,
-                                             const Operator* numberOp) {
+Reduction JSTypedLowering::ReduceJSSubtract(Node* node) {
   if (flags() & kDisableBinaryOpReduction) return NoChange();
-
   JSBinopReduction r(this, node);
-  if (numberOp == simplified()->NumberModulus()) {
-    if (r.BothInputsAre(Type::NumberOrUndefined())) {
-      return r.ChangeToPureOperator(numberOp, Type::Number());
-    }
-    return NoChange();
+  BinaryOperationHints::Hint feedback = r.GetNumberBinaryOperationFeedback();
+  if (feedback == BinaryOperationHints::kNumberOrUndefined &&
+      r.BothInputsAre(Type::PlainPrimitive())) {
+    // JSSubtract(x:plain-primitive, y:plain-primitive)
+    //   => NumberSubtract(ToNumber(x), ToNumber(y))
+    Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+    r.ConvertInputsToNumber(frame_state);
+    return r.ChangeToPureOperator(simplified()->NumberSubtract(),
+                                  Type::Number());
+  }
+  if (feedback != BinaryOperationHints::kAny) {
+    // Lower to the optimistic number binop.
+    return r.ChangeToSpeculativeOperator(
+        simplified()->SpeculativeNumberSubtract(feedback), Type::Number());
   }
   Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
-  r.ConvertInputsToNumberOrUndefined(frame_state);
-  return r.ChangeToPureOperator(numberOp, Type::Number());
+  r.ConvertInputsToNumber(frame_state);
+  return r.ChangeToPureOperator(simplified()->NumberSubtract(), Type::Number());
+}
+
+Reduction JSTypedLowering::ReduceJSMultiply(Node* node) {
+  if (flags() & kDisableBinaryOpReduction) return NoChange();
+  JSBinopReduction r(this, node);
+
+  BinaryOperationHints::Hint feedback = r.GetNumberBinaryOperationFeedback();
+  if (feedback != BinaryOperationHints::kAny) {
+    return r.ChangeToSpeculativeOperator(
+        simplified()->SpeculativeNumberMultiply(feedback), Type::Number());
+  }
+
+  Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+  r.ConvertInputsToNumber(frame_state);
+  return r.ChangeToPureOperator(simplified()->NumberMultiply(), Type::Number());
+}
+
+Reduction JSTypedLowering::ReduceJSDivide(Node* node) {
+  if (flags() & kDisableBinaryOpReduction) return NoChange();
+  JSBinopReduction r(this, node);
+  BinaryOperationHints::Hint feedback = r.GetNumberBinaryOperationFeedback();
+  if (feedback != BinaryOperationHints::kAny) {
+    return r.ChangeToSpeculativeOperator(
+        simplified()->SpeculativeNumberDivide(feedback), Type::Number());
+  }
+  Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+  r.ConvertInputsToNumber(frame_state);
+  return r.ChangeToPureOperator(simplified()->NumberDivide(), Type::Number());
 }
 
 
@@ -409,7 +544,7 @@
 
   JSBinopReduction r(this, node);
   Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
-  r.ConvertInputsToNumberOrUndefined(frame_state);
+  r.ConvertInputsToNumber(frame_state);
   r.ConvertInputsToUI32(kSigned, kSigned);
   return r.ChangeToPureOperator(intOp, Type::Integral32());
 }
@@ -422,7 +557,7 @@
 
   JSBinopReduction r(this, node);
   Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
-  r.ConvertInputsToNumberOrUndefined(frame_state);
+  r.ConvertInputsToNumber(frame_state);
   r.ConvertInputsToUI32(left_signedness, kUnsigned);
   return r.ChangeToPureOperator(shift_op);
 }
@@ -456,7 +591,10 @@
     r.ChangeToPureOperator(stringOp);
     return Changed(node);
   }
-  if (r.OneInputCannotBe(Type::StringOrReceiver())) {
+
+  CompareOperationHints::Hint hint = r.GetNumberCompareOperationFeedback();
+  if (hint != CompareOperationHints::kAny ||
+      r.OneInputCannotBe(Type::StringOrReceiver())) {
     const Operator* less_than;
     const Operator* less_than_or_equal;
     if (r.BothInputsAre(Type::Unsigned32())) {
@@ -465,10 +603,13 @@
     } else if (r.BothInputsAre(Type::Signed32())) {
       less_than = machine()->Int32LessThan();
       less_than_or_equal = machine()->Int32LessThanOrEqual();
+    } else if (hint != CompareOperationHints::kAny) {
+      less_than = simplified()->SpeculativeNumberLessThan(hint);
+      less_than_or_equal = simplified()->SpeculativeNumberLessThanOrEqual(hint);
     } else {
       // TODO(turbofan): mixed signed/unsigned int32 comparisons.
       Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
-      r.ConvertInputsToNumberOrUndefined(frame_state);
+      r.ConvertInputsToNumber(frame_state);
       less_than = simplified()->NumberLessThan();
       less_than_or_equal = simplified()->NumberLessThanOrEqual();
     }
@@ -491,7 +632,11 @@
       default:
         return NoChange();
     }
-    return r.ChangeToPureOperator(comparison);
+    if (comparison->EffectInputCount() > 0) {
+      return r.ChangeToSpeculativeOperator(comparison, Type::Boolean());
+    } else {
+      return r.ChangeToPureOperator(comparison);
+    }
   }
   // TODO(turbofan): relax/remove effects of this operator in other cases.
   return NoChange();  // Keep a generic comparison.
@@ -592,9 +737,10 @@
       return Replace(replacement);
     }
   }
-  if (r.OneInputCannotBe(Type::NumberOrString())) {
-    // For values with canonical representation (i.e. not string nor number) an
-    // empty type intersection means the values cannot be strictly equal.
+  if (r.OneInputCannotBe(Type::NumberOrSimdOrString())) {
+    // For values with canonical representation (i.e. neither String, nor
+    // Simd128Value nor Number) an empty type intersection means the values
+    // cannot be strictly equal.
     if (!r.left_type()->Maybe(r.right_type())) {
       Node* replacement = jsgraph()->BooleanConstant(invert);
       ReplaceWithValue(node, replacement);
@@ -636,7 +782,7 @@
   if (r.BothInputsAre(Type::String())) {
     return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
   }
-  if (r.BothInputsAre(Type::NumberOrUndefined())) {
+  if (r.BothInputsAre(Type::Number())) {
     return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
   }
   // TODO(turbofan): js-typed-lowering of StrictEqual(mixed types)
@@ -719,21 +865,6 @@
 }
 
 Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
-  // Check for ToNumber truncation of signaling NaN to undefined mapping.
-  if (input->opcode() == IrOpcode::kSelect) {
-    Node* check = NodeProperties::GetValueInput(input, 0);
-    Node* vtrue = NodeProperties::GetValueInput(input, 1);
-    Type* vtrue_type = NodeProperties::GetType(vtrue);
-    Node* vfalse = NodeProperties::GetValueInput(input, 2);
-    Type* vfalse_type = NodeProperties::GetType(vfalse);
-    if (vtrue_type->Is(Type::Undefined()) && vfalse_type->Is(Type::Number())) {
-      if (check->opcode() == IrOpcode::kNumberIsHoleNaN &&
-          check->InputAt(0) == vfalse) {
-        // JSToNumber(Select(NumberIsHoleNaN(x), y:undefined, x:number)) => x
-        return Replace(vfalse);
-      }
-    }
-  }
   // Try constant-folding of JSToNumber with constant inputs.
   Type* input_type = NodeProperties::GetType(input);
   if (input_type->IsConstant()) {
@@ -780,21 +911,10 @@
   }
   Type* const input_type = NodeProperties::GetType(input);
   if (input_type->Is(Type::PlainPrimitive())) {
-    if (NodeProperties::GetContextInput(node) !=
-            jsgraph()->NoContextConstant() ||
-        NodeProperties::GetEffectInput(node) != graph()->start() ||
-        NodeProperties::GetControlInput(node) != graph()->start()) {
-      // JSToNumber(x:plain-primitive,context,effect,control)
-      //   => JSToNumber(x,no-context,start,start)
-      RelaxEffectsAndControls(node);
-      NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
-      NodeProperties::ReplaceControlInput(node, graph()->start());
-      NodeProperties::ReplaceEffectInput(node, graph()->start());
-      DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
-      NodeProperties::ReplaceFrameStateInput(node, 0,
-                                             jsgraph()->EmptyFrameState());
-      return Changed(node);
-    }
+    RelaxEffectsAndControls(node);
+    node->TrimInputCount(1);
+    NodeProperties::ChangeOp(node, simplified()->PlainPrimitiveToNumber());
+    return Changed(node);
   }
   return NoChange();
 }
@@ -1013,13 +1133,13 @@
         Node* effect = NodeProperties::GetEffectInput(node);
         Node* control = NodeProperties::GetControlInput(node);
         // Convert to a number first.
-        if (!value_type->Is(Type::NumberOrUndefined())) {
+        if (!value_type->Is(Type::Number())) {
           Reduction number_reduction = ReduceJSToNumberInput(value);
           if (number_reduction.Changed()) {
             value = number_reduction.replacement();
           } else {
             Node* frame_state_for_to_number =
-                NodeProperties::GetFrameStateInput(node, 1);
+                NodeProperties::FindFrameStateBefore(node);
             value = effect =
                 graph()->NewNode(javascript()->ToNumber(), value, context,
                                  frame_state_for_to_number, effect, control);
@@ -1103,17 +1223,13 @@
   Node* prototype =
       jsgraph()->Constant(handle(initial_map->prototype(), isolate()));
 
-  Node* if_is_smi = nullptr;
-  Node* e_is_smi = nullptr;
   // If the left hand side is an object, no smi check is needed.
-  if (r.left_type()->Maybe(Type::TaggedSigned())) {
-    Node* is_smi = graph()->NewNode(simplified()->ObjectIsSmi(), r.left());
-    Node* branch_is_smi =
-        graph()->NewNode(common()->Branch(BranchHint::kFalse), is_smi, control);
-    if_is_smi = graph()->NewNode(common()->IfTrue(), branch_is_smi);
-    e_is_smi = effect;
-    control = graph()->NewNode(common()->IfFalse(), branch_is_smi);
-  }
+  Node* is_smi = graph()->NewNode(simplified()->ObjectIsSmi(), r.left());
+  Node* branch_is_smi =
+      graph()->NewNode(common()->Branch(BranchHint::kFalse), is_smi, control);
+  Node* if_is_smi = graph()->NewNode(common()->IfTrue(), branch_is_smi);
+  Node* e_is_smi = effect;
+  control = graph()->NewNode(common()->IfFalse(), branch_is_smi);
 
   Node* object_map = effect =
       graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
@@ -1179,6 +1295,17 @@
       simplified()->LoadField(AccessBuilder::ForMapPrototype()),
       loop_object_map, loop_effect, control);
 
+  // If not, check if object prototype is the null prototype.
+  Node* null_proto =
+      graph()->NewNode(simplified()->ReferenceEqual(r.right_type()),
+                       object_prototype, jsgraph()->NullConstant());
+  Node* branch_null_proto = graph()->NewNode(
+      common()->Branch(BranchHint::kFalse), null_proto, control);
+  Node* if_null_proto = graph()->NewNode(common()->IfTrue(), branch_null_proto);
+  Node* e_null_proto = effect;
+
+  control = graph()->NewNode(common()->IfFalse(), branch_null_proto);
+
   // Check if object prototype is equal to function prototype.
   Node* eq_proto =
       graph()->NewNode(simplified()->ReferenceEqual(r.right_type()),
@@ -1190,16 +1317,6 @@
 
   control = graph()->NewNode(common()->IfFalse(), branch_eq_proto);
 
-  // If not, check if object prototype is the null prototype.
-  Node* null_proto =
-      graph()->NewNode(simplified()->ReferenceEqual(r.right_type()),
-                       object_prototype, jsgraph()->NullConstant());
-  Node* branch_null_proto = graph()->NewNode(
-      common()->Branch(BranchHint::kFalse), null_proto, control);
-  Node* if_null_proto = graph()->NewNode(common()->IfTrue(), branch_null_proto);
-  Node* e_null_proto = effect;
-
-  control = graph()->NewNode(common()->IfFalse(), branch_null_proto);
   Node* load_object_map = effect =
       graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
                        object_prototype, effect, control);
@@ -1219,14 +1336,12 @@
       bool_result_runtime_has_in_proto_chain_case, jsgraph()->TrueConstant(),
       jsgraph()->FalseConstant(), control);
 
-  if (if_is_smi != nullptr) {
-    DCHECK_NOT_NULL(e_is_smi);
-    control = graph()->NewNode(common()->Merge(2), if_is_smi, control);
-    effect =
-        graph()->NewNode(common()->EffectPhi(2), e_is_smi, effect, control);
-    result = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                              jsgraph()->FalseConstant(), result, control);
-  }
+  DCHECK_NOT_NULL(e_is_smi);
+  control = graph()->NewNode(common()->Merge(2), if_is_smi, control);
+  effect =
+      graph()->NewNode(common()->EffectPhi(2), e_is_smi, effect, control);
+  result = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                            jsgraph()->FalseConstant(), result, control);
 
   ReplaceWithValue(node, result, effect, control);
   return Changed(result);
@@ -1387,9 +1502,6 @@
         Handle<JSFunction>::cast(target_type->AsConstant()->Value());
     Handle<SharedFunctionInfo> shared(function->shared(), isolate());
 
-    // Remove the eager bailout frame state.
-    NodeProperties::RemoveFrameStateInput(node, 1);
-
     // Patch {node} to an indirect call via the {function}s construct stub.
     Callable callable(handle(shared->construct_stub(), isolate()),
                       ConstructStubDescriptor(isolate()));
@@ -1409,9 +1521,6 @@
 
   // Check if {target} is a JSFunction.
   if (target_type->Is(Type::Function())) {
-    // Remove the eager bailout frame state.
-    NodeProperties::RemoveFrameStateInput(node, 1);
-
     // Patch {node} to an indirect call via the ConstructFunction builtin.
     Callable callable = CodeFactory::ConstructFunction(isolate());
     node->RemoveInput(arity + 1);
@@ -1440,9 +1549,9 @@
   Type* target_type = NodeProperties::GetType(target);
   Node* receiver = NodeProperties::GetValueInput(node, 1);
   Type* receiver_type = NodeProperties::GetType(receiver);
-  Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
+  Node* frame_state = NodeProperties::FindFrameStateBefore(node);
 
   // Try to infer receiver {convert_mode} from {receiver} type.
   if (receiver_type->Is(Type::NullOrUndefined())) {
@@ -1480,9 +1589,6 @@
     // Update the effect dependency for the {node}.
     NodeProperties::ReplaceEffectInput(node, effect);
 
-    // Remove the eager bailout frame state.
-    NodeProperties::RemoveFrameStateInput(node, 1);
-
     // Compute flags for the call.
     CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
     if (p.tail_call_mode() == TailCallMode::kAllow) {
@@ -1520,9 +1626,6 @@
 
   // Check if {target} is a JSFunction.
   if (target_type->Is(Type::Function())) {
-    // Remove the eager bailout frame state.
-    NodeProperties::RemoveFrameStateInput(node, 1);
-
     // Compute flags for the call.
     CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
     if (p.tail_call_mode() == TailCallMode::kAllow) {
@@ -1630,6 +1733,84 @@
   return Changed(node);
 }
 
+Reduction JSTypedLowering::ReduceJSGeneratorStore(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSGeneratorStore, node->opcode());
+  Node* generator = NodeProperties::GetValueInput(node, 0);
+  Node* continuation = NodeProperties::GetValueInput(node, 1);
+  Node* offset = NodeProperties::GetValueInput(node, 2);
+  Node* context = NodeProperties::GetContextInput(node);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  int register_count = OpParameter<int>(node);
+
+  FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectOperandStack();
+  FieldAccess context_field = AccessBuilder::ForJSGeneratorObjectContext();
+  FieldAccess continuation_field =
+      AccessBuilder::ForJSGeneratorObjectContinuation();
+  FieldAccess input_or_debug_pos_field =
+      AccessBuilder::ForJSGeneratorObjectInputOrDebugPos();
+
+  Node* array = effect = graph()->NewNode(simplified()->LoadField(array_field),
+                                          generator, effect, control);
+
+  for (int i = 0; i < register_count; ++i) {
+    Node* value = NodeProperties::GetValueInput(node, 3 + i);
+    effect = graph()->NewNode(
+        simplified()->StoreField(AccessBuilder::ForFixedArraySlot(i)), array,
+        value, effect, control);
+  }
+
+  effect = graph()->NewNode(simplified()->StoreField(context_field), generator,
+                            context, effect, control);
+  effect = graph()->NewNode(simplified()->StoreField(continuation_field),
+                            generator, continuation, effect, control);
+  effect = graph()->NewNode(simplified()->StoreField(input_or_debug_pos_field),
+                            generator, offset, effect, control);
+
+  ReplaceWithValue(node, effect, effect, control);
+  return Changed(effect);
+}
+
+Reduction JSTypedLowering::ReduceJSGeneratorRestoreContinuation(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSGeneratorRestoreContinuation, node->opcode());
+  Node* generator = NodeProperties::GetValueInput(node, 0);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+
+  FieldAccess continuation_field =
+      AccessBuilder::ForJSGeneratorObjectContinuation();
+
+  Node* continuation = effect = graph()->NewNode(
+      simplified()->LoadField(continuation_field), generator, effect, control);
+  Node* executing = jsgraph()->Constant(JSGeneratorObject::kGeneratorExecuting);
+  effect = graph()->NewNode(simplified()->StoreField(continuation_field),
+                            generator, executing, effect, control);
+
+  ReplaceWithValue(node, continuation, effect, control);
+  return Changed(continuation);
+}
+
+Reduction JSTypedLowering::ReduceJSGeneratorRestoreRegister(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSGeneratorRestoreRegister, node->opcode());
+  Node* generator = NodeProperties::GetValueInput(node, 0);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  int index = OpParameter<int>(node);
+
+  FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectOperandStack();
+  FieldAccess element_field = AccessBuilder::ForFixedArraySlot(index);
+
+  Node* array = effect = graph()->NewNode(simplified()->LoadField(array_field),
+                                          generator, effect, control);
+  Node* element = effect = graph()->NewNode(
+      simplified()->LoadField(element_field), array, effect, control);
+  Node* stale = jsgraph()->StaleRegisterConstant();
+  effect = graph()->NewNode(simplified()->StoreField(element_field), array,
+                            stale, effect, control);
+
+  ReplaceWithValue(node, element, effect, control);
+  return Changed(element);
+}
 
 Reduction JSTypedLowering::ReduceSelect(Node* node) {
   DCHECK_EQ(IrOpcode::kSelect, node->opcode());
@@ -1666,31 +1847,38 @@
   // result value and can simply replace the node if it's eliminable.
   if (!NodeProperties::IsConstant(node) && NodeProperties::IsTyped(node) &&
       node->op()->HasProperty(Operator::kEliminatable)) {
+    // We can only constant-fold nodes here, that are known to not cause any
+    // side-effect, may it be a JavaScript observable side-effect or a possible
+    // eager deoptimization exit (i.e. {node} has an operator that doesn't have
+    // the Operator::kNoDeopt property).
     Type* upper = NodeProperties::GetType(node);
-    if (upper->IsConstant()) {
-      Node* replacement = jsgraph()->Constant(upper->AsConstant()->Value());
-      ReplaceWithValue(node, replacement);
-      return Changed(replacement);
-    } else if (upper->Is(Type::MinusZero())) {
-      Node* replacement = jsgraph()->Constant(factory()->minus_zero_value());
-      ReplaceWithValue(node, replacement);
-      return Changed(replacement);
-    } else if (upper->Is(Type::NaN())) {
-      Node* replacement = jsgraph()->NaNConstant();
-      ReplaceWithValue(node, replacement);
-      return Changed(replacement);
-    } else if (upper->Is(Type::Null())) {
-      Node* replacement = jsgraph()->NullConstant();
-      ReplaceWithValue(node, replacement);
-      return Changed(replacement);
-    } else if (upper->Is(Type::PlainNumber()) && upper->Min() == upper->Max()) {
-      Node* replacement = jsgraph()->Constant(upper->Min());
-      ReplaceWithValue(node, replacement);
-      return Changed(replacement);
-    } else if (upper->Is(Type::Undefined())) {
-      Node* replacement = jsgraph()->UndefinedConstant();
-      ReplaceWithValue(node, replacement);
-      return Changed(replacement);
+    if (upper->IsInhabited()) {
+      if (upper->IsConstant()) {
+        Node* replacement = jsgraph()->Constant(upper->AsConstant()->Value());
+        ReplaceWithValue(node, replacement);
+        return Changed(replacement);
+      } else if (upper->Is(Type::MinusZero())) {
+        Node* replacement = jsgraph()->Constant(factory()->minus_zero_value());
+        ReplaceWithValue(node, replacement);
+        return Changed(replacement);
+      } else if (upper->Is(Type::NaN())) {
+        Node* replacement = jsgraph()->NaNConstant();
+        ReplaceWithValue(node, replacement);
+        return Changed(replacement);
+      } else if (upper->Is(Type::Null())) {
+        Node* replacement = jsgraph()->NullConstant();
+        ReplaceWithValue(node, replacement);
+        return Changed(replacement);
+      } else if (upper->Is(Type::PlainNumber()) &&
+                 upper->Min() == upper->Max()) {
+        Node* replacement = jsgraph()->Constant(upper->Min());
+        ReplaceWithValue(node, replacement);
+        return Changed(replacement);
+      } else if (upper->Is(Type::Undefined())) {
+        Node* replacement = jsgraph()->UndefinedConstant();
+        ReplaceWithValue(node, replacement);
+        return Changed(replacement);
+      }
     }
   }
   switch (node->opcode()) {
@@ -1723,11 +1911,11 @@
     case IrOpcode::kJSAdd:
       return ReduceJSAdd(node);
     case IrOpcode::kJSSubtract:
-      return ReduceNumberBinop(node, simplified()->NumberSubtract());
+      return ReduceJSSubtract(node);
     case IrOpcode::kJSMultiply:
-      return ReduceNumberBinop(node, simplified()->NumberMultiply());
+      return ReduceJSMultiply(node);
     case IrOpcode::kJSDivide:
-      return ReduceNumberBinop(node, simplified()->NumberDivide());
+      return ReduceJSDivide(node);
     case IrOpcode::kJSModulus:
       return ReduceJSModulus(node);
     case IrOpcode::kJSToBoolean:
@@ -1766,6 +1954,12 @@
       return ReduceJSForInNext(node);
     case IrOpcode::kJSForInStep:
       return ReduceJSForInStep(node);
+    case IrOpcode::kJSGeneratorStore:
+      return ReduceJSGeneratorStore(node);
+    case IrOpcode::kJSGeneratorRestoreContinuation:
+      return ReduceJSGeneratorRestoreContinuation(node);
+    case IrOpcode::kJSGeneratorRestoreRegister:
+      return ReduceJSGeneratorRestoreRegister(node);
     case IrOpcode::kSelect:
       return ReduceSelect(node);
     default:
@@ -1781,6 +1975,14 @@
                           jsgraph()->Int32Constant(rhs));
 }
 
+Node* JSTypedLowering::EmptyFrameState() {
+  return graph()->NewNode(
+      common()->FrameState(BailoutId::None(), OutputFrameStateCombine::Ignore(),
+                           nullptr),
+      jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(),
+      jsgraph()->EmptyStateValues(), jsgraph()->NoContextConstant(),
+      jsgraph()->UndefinedConstant(), graph()->start());
+}
 
 Factory* JSTypedLowering::factory() const { return jsgraph()->factory(); }
 
diff --git a/src/compiler/js-typed-lowering.h b/src/compiler/js-typed-lowering.h
index 8733e6c..a370b7a 100644
--- a/src/compiler/js-typed-lowering.h
+++ b/src/compiler/js-typed-lowering.h
@@ -36,6 +36,7 @@
     kNoFlags = 0u,
     kDeoptimizationEnabled = 1u << 0,
     kDisableBinaryOpReduction = 1u << 1,
+    kTypeFeedbackEnabled = 1u << 2,
   };
   typedef base::Flags<Flag> Flags;
 
@@ -76,14 +77,20 @@
   Reduction ReduceJSForInDone(Node* node);
   Reduction ReduceJSForInNext(Node* node);
   Reduction ReduceJSForInStep(Node* node);
+  Reduction ReduceJSGeneratorStore(Node* node);
+  Reduction ReduceJSGeneratorRestoreContinuation(Node* node);
+  Reduction ReduceJSGeneratorRestoreRegister(Node* node);
   Reduction ReduceSelect(Node* node);
-  Reduction ReduceNumberBinop(Node* node, const Operator* numberOp);
+  Reduction ReduceJSSubtract(Node* node);
+  Reduction ReduceJSDivide(Node* node);
   Reduction ReduceInt32Binop(Node* node, const Operator* intOp);
   Reduction ReduceUI32Shift(Node* node, Signedness left_signedness,
                             const Operator* shift_op);
 
   Node* Word32Shl(Node* const lhs, int32_t const rhs);
 
+  Node* EmptyFrameState();
+
   Factory* factory() const;
   Graph* graph() const;
   JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc
index 5e217b0..c3b68d6 100644
--- a/src/compiler/linkage.cc
+++ b/src/compiler/linkage.cc
@@ -131,7 +131,7 @@
 
 
 // static
-int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
+bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
   // Most runtime functions need a FrameState. A few chosen ones that we know
   // not to call into arbitrary JavaScript, not to throw, and not to deoptimize
   // are blacklisted here and can be called without a FrameState.
@@ -139,16 +139,11 @@
     case Runtime::kAbort:
     case Runtime::kAllocateInTargetSpace:
     case Runtime::kCreateIterResultObject:
-    case Runtime::kDefineDataPropertyInLiteral:
     case Runtime::kDefineGetterPropertyUnchecked:  // TODO(jarin): Is it safe?
     case Runtime::kDefineSetterPropertyUnchecked:  // TODO(jarin): Is it safe?
     case Runtime::kForInDone:
     case Runtime::kForInStep:
-    case Runtime::kGeneratorSetContext:
     case Runtime::kGeneratorGetContinuation:
-    case Runtime::kGeneratorSetContinuation:
-    case Runtime::kGeneratorLoadRegister:
-    case Runtime::kGeneratorStoreRegister:
     case Runtime::kGetSuperConstructor:
     case Runtime::kIsFunction:
     case Runtime::kNewClosure:
@@ -167,12 +162,15 @@
     case Runtime::kToFastProperties:  // TODO(conradw): Is it safe?
     case Runtime::kTraceEnter:
     case Runtime::kTraceExit:
-      return 0;
+      return false;
+    case Runtime::kInlineCall:
+    case Runtime::kInlineDeoptimizeNow:
     case Runtime::kInlineGetPrototype:
     case Runtime::kInlineNewObject:
     case Runtime::kInlineRegExpConstructResult:
     case Runtime::kInlineRegExpExec:
     case Runtime::kInlineSubString:
+    case Runtime::kInlineThrowNotDateError:
     case Runtime::kInlineToInteger:
     case Runtime::kInlineToLength:
     case Runtime::kInlineToName:
@@ -182,11 +180,7 @@
     case Runtime::kInlineToPrimitive_Number:
     case Runtime::kInlineToPrimitive_String:
     case Runtime::kInlineToString:
-      return 1;
-    case Runtime::kInlineCall:
-    case Runtime::kInlineDeoptimizeNow:
-    case Runtime::kInlineThrowNotDateError:
-      return 2;
+      return true;
     default:
       break;
   }
@@ -194,9 +188,9 @@
   // Most inlined runtime functions (except the ones listed above) can be called
   // without a FrameState or will be lowered by JSIntrinsicLowering internally.
   const Runtime::Function* const f = Runtime::FunctionForId(function);
-  if (f->intrinsic_type == Runtime::IntrinsicType::INLINE) return 0;
+  if (f->intrinsic_type == Runtime::IntrinsicType::INLINE) return false;
 
-  return 1;
+  return true;
 }
 
 
@@ -259,7 +253,7 @@
   locations.AddParam(regloc(kContextRegister));
   types.AddParam(MachineType::AnyTagged());
 
-  if (Linkage::FrameStateInputCount(function_id) == 0) {
+  if (!Linkage::NeedsFrameStateInput(function_id)) {
     flags = static_cast<CallDescriptor::Flags>(
         flags & ~CallDescriptor::kNeedsFrameState);
   }
diff --git a/src/compiler/linkage.h b/src/compiler/linkage.h
index 958e8dc..8596327 100644
--- a/src/compiler/linkage.h
+++ b/src/compiler/linkage.h
@@ -368,7 +368,7 @@
   bool ParameterHasSecondaryLocation(int index) const;
   LinkageLocation GetParameterSecondaryLocation(int index) const;
 
-  static int FrameStateInputCount(Runtime::FunctionId function);
+  static bool NeedsFrameStateInput(Runtime::FunctionId function);
 
   // Get the location where an incoming OSR value is stored.
   LinkageLocation GetOsrValueLocation(int index) const;
diff --git a/src/compiler/machine-operator-reducer.cc b/src/compiler/machine-operator-reducer.cc
index 4b50ffe..b566f48 100644
--- a/src/compiler/machine-operator-reducer.cc
+++ b/src/compiler/machine-operator-reducer.cc
@@ -6,6 +6,7 @@
 
 #include "src/base/bits.h"
 #include "src/base/division-by-constant.h"
+#include "src/base/ieee754.h"
 #include "src/codegen.h"
 #include "src/compiler/diamond.h"
 #include "src/compiler/graph.h"
@@ -152,14 +153,8 @@
     }
     case IrOpcode::kWord32Shl:
       return ReduceWord32Shl(node);
-    case IrOpcode::kWord32Shr: {
-      Uint32BinopMatcher m(node);
-      if (m.right().Is(0)) return Replace(m.left().node());  // x >>> 0 => x
-      if (m.IsFoldable()) {                                  // K >>> K => K
-        return ReplaceInt32(m.left().Value() >> m.right().Value());
-      }
-      return ReduceWord32Shifts(node);
-    }
+    case IrOpcode::kWord32Shr:
+      return ReduceWord32Shr(node);
     case IrOpcode::kWord32Sar:
       return ReduceWord32Sar(node);
     case IrOpcode::kWord32Ror: {
@@ -239,18 +234,6 @@
       if (m.IsFoldable()) {  // K < K => K
         return ReplaceBool(m.left().Value() < m.right().Value());
       }
-      if (m.left().IsInt32Sub() && m.right().Is(0)) {  // x - y < 0 => x < y
-        Int32BinopMatcher msub(m.left().node());
-        node->ReplaceInput(0, msub.left().node());
-        node->ReplaceInput(1, msub.right().node());
-        return Changed(node);
-      }
-      if (m.left().Is(0) && m.right().IsInt32Sub()) {  // 0 < x - y => y < x
-        Int32BinopMatcher msub(m.right().node());
-        node->ReplaceInput(0, msub.right().node());
-        node->ReplaceInput(1, msub.left().node());
-        return Changed(node);
-      }
       if (m.LeftEqualsRight()) return ReplaceBool(false);  // x < x => false
       break;
     }
@@ -259,18 +242,6 @@
       if (m.IsFoldable()) {  // K <= K => K
         return ReplaceBool(m.left().Value() <= m.right().Value());
       }
-      if (m.left().IsInt32Sub() && m.right().Is(0)) {  // x - y <= 0 => x <= y
-        Int32BinopMatcher msub(m.left().node());
-        node->ReplaceInput(0, msub.left().node());
-        node->ReplaceInput(1, msub.right().node());
-        return Changed(node);
-      }
-      if (m.left().Is(0) && m.right().IsInt32Sub()) {  // 0 <= x - y => y <= x
-        Int32BinopMatcher msub(m.right().node());
-        node->ReplaceInput(0, msub.right().node());
-        node->ReplaceInput(1, msub.left().node());
-        return Changed(node);
-      }
       if (m.LeftEqualsRight()) return ReplaceBool(true);  // x <= x => true
       break;
     }
@@ -382,6 +353,80 @@
       }
       break;
     }
+    case IrOpcode::kFloat64Atan: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(base::ieee754::atan(m.Value()));
+      break;
+    }
+    case IrOpcode::kFloat64Atan2: {
+      Float64BinopMatcher m(node);
+      if (m.right().IsNaN()) {
+        return Replace(m.right().node());
+      }
+      if (m.left().IsNaN()) {
+        return Replace(m.left().node());
+      }
+      if (m.IsFoldable()) {
+        return ReplaceFloat64(
+            base::ieee754::atan2(m.left().Value(), m.right().Value()));
+      }
+      break;
+    }
+    case IrOpcode::kFloat64Atanh: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(base::ieee754::atanh(m.Value()));
+      break;
+    }
+    case IrOpcode::kFloat64Cos: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(base::ieee754::cos(m.Value()));
+      break;
+    }
+    case IrOpcode::kFloat64Exp: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(base::ieee754::exp(m.Value()));
+      break;
+    }
+    case IrOpcode::kFloat64Expm1: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(base::ieee754::expm1(m.Value()));
+      break;
+    }
+    case IrOpcode::kFloat64Log: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(base::ieee754::log(m.Value()));
+      break;
+    }
+    case IrOpcode::kFloat64Log1p: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(base::ieee754::log1p(m.Value()));
+      break;
+    }
+    case IrOpcode::kFloat64Log2: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(base::ieee754::log2(m.Value()));
+      break;
+    }
+    case IrOpcode::kFloat64Log10: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(base::ieee754::log10(m.Value()));
+      break;
+    }
+    case IrOpcode::kFloat64Cbrt: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(base::ieee754::cbrt(m.Value()));
+      break;
+    }
+    case IrOpcode::kFloat64Sin: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(base::ieee754::sin(m.Value()));
+      break;
+    }
+    case IrOpcode::kFloat64Tan: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(base::ieee754::tan(m.Value()));
+      break;
+    }
     case IrOpcode::kChangeFloat32ToFloat64: {
       Float32Matcher m(node->InputAt(0));
       if (m.HasValue()) return ReplaceFloat64(m.Value());
@@ -785,6 +830,25 @@
   return ReduceWord32Shifts(node);
 }
 
+Reduction MachineOperatorReducer::ReduceWord32Shr(Node* node) {
+  Uint32BinopMatcher m(node);
+  if (m.right().Is(0)) return Replace(m.left().node());  // x >>> 0 => x
+  if (m.IsFoldable()) {                                  // K >>> K => K
+    return ReplaceInt32(m.left().Value() >> m.right().Value());
+  }
+  if (m.left().IsWord32And() && m.right().HasValue()) {
+    Uint32BinopMatcher mleft(m.left().node());
+    if (mleft.right().HasValue()) {
+      uint32_t shift = m.right().Value() & 0x1f;
+      uint32_t mask = mleft.right().Value();
+      if ((mask >> shift) == 0) {
+        // (m >>> s) == 0 implies ((x & m) >>> s) == 0
+        return ReplaceInt32(0);
+      }
+    }
+  }
+  return ReduceWord32Shifts(node);
+}
 
 Reduction MachineOperatorReducer::ReduceWord32Sar(Node* node) {
   Int32BinopMatcher m(node);
diff --git a/src/compiler/machine-operator-reducer.h b/src/compiler/machine-operator-reducer.h
index cddf13d..e44521e 100644
--- a/src/compiler/machine-operator-reducer.h
+++ b/src/compiler/machine-operator-reducer.h
@@ -74,6 +74,7 @@
   Reduction ReduceProjection(size_t index, Node* node);
   Reduction ReduceWord32Shifts(Node* node);
   Reduction ReduceWord32Shl(Node* node);
+  Reduction ReduceWord32Shr(Node* node);
   Reduction ReduceWord32Sar(Node* node);
   Reduction ReduceWord32And(Node* node);
   Reduction ReduceWord32Or(Node* node);
diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc
index 0d229c7..3662d0a 100644
--- a/src/compiler/machine-operator.cc
+++ b/src/compiler/machine-operator.cc
@@ -88,10 +88,7 @@
   V(Word64Clz, Operator::kNoProperties, 1, 0, 1)                              \
   V(Word64Equal, Operator::kCommutative, 2, 0, 1)                             \
   V(Int32Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)       \
-  V(Int32AddWithOverflow, Operator::kAssociative | Operator::kCommutative, 2, \
-    0, 2)                                                                     \
   V(Int32Sub, Operator::kNoProperties, 2, 0, 1)                               \
-  V(Int32SubWithOverflow, Operator::kNoProperties, 2, 0, 2)                   \
   V(Int32Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)       \
   V(Int32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)   \
   V(Int32Div, Operator::kNoProperties, 2, 1, 1)                               \
@@ -104,10 +101,7 @@
   V(Uint32Mod, Operator::kNoProperties, 2, 1, 1)                              \
   V(Uint32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
   V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)       \
-  V(Int64AddWithOverflow, Operator::kAssociative | Operator::kCommutative, 2, \
-    0, 2)                                                                     \
   V(Int64Sub, Operator::kNoProperties, 2, 0, 1)                               \
-  V(Int64SubWithOverflow, Operator::kNoProperties, 2, 0, 2)                   \
   V(Int64Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)       \
   V(Int64Div, Operator::kNoProperties, 2, 1, 1)                               \
   V(Int64Mod, Operator::kNoProperties, 2, 1, 1)                               \
@@ -130,6 +124,7 @@
   V(TryTruncateFloat32ToUint64, Operator::kNoProperties, 1, 0, 2)             \
   V(TryTruncateFloat64ToUint64, Operator::kNoProperties, 1, 0, 2)             \
   V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 0, 1)                   \
+  V(Float64SilenceNaN, Operator::kNoProperties, 1, 0, 1)                      \
   V(RoundFloat64ToInt32, Operator::kNoProperties, 1, 0, 1)                    \
   V(RoundInt32ToFloat32, Operator::kNoProperties, 1, 0, 1)                    \
   V(RoundInt64ToFloat32, Operator::kNoProperties, 1, 0, 1)                    \
@@ -154,13 +149,26 @@
   V(Float32Div, Operator::kNoProperties, 2, 0, 1)                             \
   V(Float32Sqrt, Operator::kNoProperties, 1, 0, 1)                            \
   V(Float64Abs, Operator::kNoProperties, 1, 0, 1)                             \
+  V(Float64Atan, Operator::kNoProperties, 1, 0, 1)                            \
+  V(Float64Atan2, Operator::kNoProperties, 2, 0, 1)                           \
+  V(Float64Atanh, Operator::kNoProperties, 1, 0, 1)                           \
+  V(Float64Cbrt, Operator::kNoProperties, 1, 0, 1)                            \
+  V(Float64Cos, Operator::kNoProperties, 1, 0, 1)                             \
+  V(Float64Exp, Operator::kNoProperties, 1, 0, 1)                             \
+  V(Float64Expm1, Operator::kNoProperties, 1, 0, 1)                           \
+  V(Float64Log, Operator::kNoProperties, 1, 0, 1)                             \
+  V(Float64Log1p, Operator::kNoProperties, 1, 0, 1)                           \
+  V(Float64Log2, Operator::kNoProperties, 1, 0, 1)                            \
+  V(Float64Log10, Operator::kNoProperties, 1, 0, 1)                           \
   V(Float64Add, Operator::kCommutative, 2, 0, 1)                              \
   V(Float64Sub, Operator::kNoProperties, 2, 0, 1)                             \
   V(Float64SubPreserveNan, Operator::kNoProperties, 2, 0, 1)                  \
   V(Float64Mul, Operator::kCommutative, 2, 0, 1)                              \
   V(Float64Div, Operator::kNoProperties, 2, 0, 1)                             \
   V(Float64Mod, Operator::kNoProperties, 2, 0, 1)                             \
+  V(Float64Sin, Operator::kNoProperties, 1, 0, 1)                             \
   V(Float64Sqrt, Operator::kNoProperties, 1, 0, 1)                            \
+  V(Float64Tan, Operator::kNoProperties, 1, 0, 1)                             \
   V(Float32Equal, Operator::kCommutative, 2, 0, 1)                            \
   V(Float32LessThan, Operator::kNoProperties, 2, 0, 1)                        \
   V(Float32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                 \
@@ -372,7 +380,15 @@
   V(Float64RoundTruncate, Operator::kNoProperties, 1, 0, 1) \
   V(Float64RoundTiesAway, Operator::kNoProperties, 1, 0, 1) \
   V(Float32RoundTiesEven, Operator::kNoProperties, 1, 0, 1) \
-  V(Float64RoundTiesEven, Operator::kNoProperties, 1, 0, 1)
+  V(Float64RoundTiesEven, Operator::kNoProperties, 1, 0, 1) \
+  V(Float32Neg, Operator::kNoProperties, 1, 0, 1)           \
+  V(Float64Neg, Operator::kNoProperties, 1, 0, 1)
+
+#define OVERFLOW_OP_LIST(V)                                                \
+  V(Int32AddWithOverflow, Operator::kAssociative | Operator::kCommutative) \
+  V(Int32SubWithOverflow, Operator::kNoProperties)                         \
+  V(Int64AddWithOverflow, Operator::kAssociative | Operator::kCommutative) \
+  V(Int64SubWithOverflow, Operator::kNoProperties)
 
 #define MACHINE_TYPE_LIST(V) \
   V(Float32)                 \
@@ -426,33 +442,47 @@
   PURE_OPTIONAL_OP_LIST(PURE)
 #undef PURE
 
-#define LOAD(Type)                                                             \
-  struct Load##Type##Operator final : public Operator1<LoadRepresentation> {   \
-    Load##Type##Operator()                                                     \
-        : Operator1<LoadRepresentation>(                                       \
-              IrOpcode::kLoad, Operator::kNoThrow | Operator::kNoWrite,        \
-              "Load", 2, 1, 1, 1, 1, 0, MachineType::Type()) {}                \
-  };                                                                           \
-  struct CheckedLoad##Type##Operator final                                     \
-      : public Operator1<CheckedLoadRepresentation> {                          \
-    CheckedLoad##Type##Operator()                                              \
-        : Operator1<CheckedLoadRepresentation>(                                \
-              IrOpcode::kCheckedLoad, Operator::kNoThrow | Operator::kNoWrite, \
-              "CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {}         \
-  };                                                                           \
-  Load##Type##Operator kLoad##Type;                                            \
+#define OVERFLOW_OP(Name, properties)                                        \
+  struct Name##Operator final : public Operator {                            \
+    Name##Operator()                                                         \
+        : Operator(IrOpcode::k##Name,                                        \
+                   Operator::kEliminatable | Operator::kNoRead | properties, \
+                   #Name, 2, 0, 1, 2, 0, 0) {}                               \
+  };                                                                         \
+  Name##Operator k##Name;
+  OVERFLOW_OP_LIST(OVERFLOW_OP)
+#undef OVERFLOW_OP
+
+#define LOAD(Type)                                                           \
+  struct Load##Type##Operator final : public Operator1<LoadRepresentation> { \
+    Load##Type##Operator()                                                   \
+        : Operator1<LoadRepresentation>(                                     \
+              IrOpcode::kLoad,                                               \
+              Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,  \
+              "Load", 2, 1, 1, 1, 1, 0, MachineType::Type()) {}              \
+  };                                                                         \
+  struct CheckedLoad##Type##Operator final                                   \
+      : public Operator1<CheckedLoadRepresentation> {                        \
+    CheckedLoad##Type##Operator()                                            \
+        : Operator1<CheckedLoadRepresentation>(                              \
+              IrOpcode::kCheckedLoad,                                        \
+              Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,  \
+              "CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {}       \
+  };                                                                         \
+  Load##Type##Operator kLoad##Type;                                          \
   CheckedLoad##Type##Operator kCheckedLoad##Type;
   MACHINE_TYPE_LIST(LOAD)
 #undef LOAD
 
-#define STACKSLOT(Type)                                                       \
-  struct StackSlot##Type##Operator final                                      \
-      : public Operator1<MachineRepresentation> {                             \
-    StackSlot##Type##Operator()                                               \
-        : Operator1<MachineRepresentation>(                                   \
-              IrOpcode::kStackSlot, Operator::kNoThrow, "StackSlot", 0, 0, 0, \
-              1, 0, 0, MachineType::Type().representation()) {}               \
-  };                                                                          \
+#define STACKSLOT(Type)                                                      \
+  struct StackSlot##Type##Operator final                                     \
+      : public Operator1<MachineRepresentation> {                            \
+    StackSlot##Type##Operator()                                              \
+        : Operator1<MachineRepresentation>(                                  \
+              IrOpcode::kStackSlot, Operator::kNoDeopt | Operator::kNoThrow, \
+              "StackSlot", 0, 0, 0, 1, 0, 0,                                 \
+              MachineType::Type().representation()) {}                       \
+  };                                                                         \
   StackSlot##Type##Operator kStackSlot##Type;
   MACHINE_TYPE_LIST(STACKSLOT)
 #undef STACKSLOT
@@ -461,7 +491,8 @@
   struct Store##Type##Operator : public Operator1<StoreRepresentation> {       \
     explicit Store##Type##Operator(WriteBarrierKind write_barrier_kind)        \
         : Operator1<StoreRepresentation>(                                      \
-              IrOpcode::kStore, Operator::kNoRead | Operator::kNoThrow,        \
+              IrOpcode::kStore,                                                \
+              Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,     \
               "Store", 3, 1, 1, 0, 1, 0,                                       \
               StoreRepresentation(MachineRepresentation::Type,                 \
                                   write_barrier_kind)) {}                      \
@@ -490,7 +521,8 @@
       : public Operator1<CheckedStoreRepresentation> {                         \
     CheckedStore##Type##Operator()                                             \
         : Operator1<CheckedStoreRepresentation>(                               \
-              IrOpcode::kCheckedStore, Operator::kNoRead | Operator::kNoThrow, \
+              IrOpcode::kCheckedStore,                                         \
+              Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,     \
               "CheckedStore", 4, 1, 1, 0, 1, 0, MachineRepresentation::Type) { \
     }                                                                          \
   };                                                                           \
@@ -503,14 +535,15 @@
   MACHINE_REPRESENTATION_LIST(STORE)
 #undef STORE
 
-#define ATOMIC_LOAD(Type)                                                     \
-  struct AtomicLoad##Type##Operator final                                     \
-      : public Operator1<LoadRepresentation> {                                \
-    AtomicLoad##Type##Operator()                                              \
-        : Operator1<LoadRepresentation>(                                      \
-              IrOpcode::kAtomicLoad, Operator::kNoThrow | Operator::kNoWrite, \
-              "AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {}         \
-  };                                                                          \
+#define ATOMIC_LOAD(Type)                                                   \
+  struct AtomicLoad##Type##Operator final                                   \
+      : public Operator1<LoadRepresentation> {                              \
+    AtomicLoad##Type##Operator()                                            \
+        : Operator1<LoadRepresentation>(                                    \
+              IrOpcode::kAtomicLoad,                                        \
+              Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
+              "AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {}       \
+  };                                                                        \
   AtomicLoad##Type##Operator kAtomicLoad##Type;
   ATOMIC_TYPE_LIST(ATOMIC_LOAD)
 #undef ATOMIC_LOAD
@@ -520,23 +553,39 @@
       : public Operator1<MachineRepresentation> {                              \
     AtomicStore##Type##Operator()                                              \
         : Operator1<MachineRepresentation>(                                    \
-              IrOpcode::kAtomicStore, Operator::kNoRead | Operator::kNoThrow,  \
+              IrOpcode::kAtomicStore,                                          \
+              Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,     \
               "AtomicStore", 3, 1, 1, 0, 1, 0, MachineRepresentation::Type) {} \
   };                                                                           \
   AtomicStore##Type##Operator kAtomicStore##Type;
   ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
 #undef STORE
+
+  struct DebugBreakOperator : public Operator {
+    DebugBreakOperator()
+        : Operator(IrOpcode::kDebugBreak, Operator::kNoThrow, "DebugBreak", 0,
+                   0, 0, 0, 0, 0) {}
+  };
+  DebugBreakOperator kDebugBreak;
 };
 
+struct CommentOperator : public Operator1<const char*> {
+  explicit CommentOperator(const char* msg)
+      : Operator1<const char*>(IrOpcode::kComment, Operator::kNoThrow,
+                               "Comment", 0, 0, 0, 0, 0, 0, msg) {}
+};
 
 static base::LazyInstance<MachineOperatorGlobalCache>::type kCache =
     LAZY_INSTANCE_INITIALIZER;
 
-
-MachineOperatorBuilder::MachineOperatorBuilder(Zone* zone,
-                                               MachineRepresentation word,
-                                               Flags flags)
-    : cache_(kCache.Get()), word_(word), flags_(flags) {
+MachineOperatorBuilder::MachineOperatorBuilder(
+    Zone* zone, MachineRepresentation word, Flags flags,
+    AlignmentRequirements alignmentRequirements)
+    : zone_(zone),
+      cache_(kCache.Get()),
+      word_(word),
+      flags_(flags),
+      alignment_requirements_(alignmentRequirements) {
   DCHECK(word == MachineRepresentation::kWord32 ||
          word == MachineRepresentation::kWord64);
 }
@@ -556,6 +605,10 @@
 PURE_OPTIONAL_OP_LIST(PURE)
 #undef PURE
 
+#define OVERFLOW_OP(Name, properties) \
+  const Operator* MachineOperatorBuilder::Name() { return &cache_.k##Name; }
+OVERFLOW_OP_LIST(OVERFLOW_OP)
+#undef OVERFLOW_OP
 
 const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
 #define LOAD(Type)                  \
@@ -604,6 +657,13 @@
   return nullptr;
 }
 
+const Operator* MachineOperatorBuilder::DebugBreak() {
+  return &cache_.kDebugBreak;
+}
+
+const Operator* MachineOperatorBuilder::Comment(const char* msg) {
+  return new (zone_) CommentOperator(msg);
+}
 
 const Operator* MachineOperatorBuilder::CheckedLoad(
     CheckedLoadRepresentation rep) {
diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h
index 814f6c9..7c443f4 100644
--- a/src/compiler/machine-operator.h
+++ b/src/compiler/machine-operator.h
@@ -113,20 +113,89 @@
     kWord64Popcnt = 1u << 19,
     kWord32ReverseBits = 1u << 20,
     kWord64ReverseBits = 1u << 21,
-    kAllOptionalOps = kFloat32Max | kFloat32Min | kFloat64Max | kFloat64Min |
-                      kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
-                      kFloat64RoundUp | kFloat32RoundTruncate |
-                      kFloat64RoundTruncate | kFloat64RoundTiesAway |
-                      kFloat32RoundTiesEven | kFloat64RoundTiesEven |
-                      kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt |
-                      kWord32ReverseBits | kWord64ReverseBits
+    kFloat32Neg = 1u << 22,
+    kFloat64Neg = 1u << 23,
+    kAllOptionalOps =
+        kFloat32Max | kFloat32Min | kFloat64Max | kFloat64Min |
+        kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
+        kFloat64RoundUp | kFloat32RoundTruncate | kFloat64RoundTruncate |
+        kFloat64RoundTiesAway | kFloat32RoundTiesEven | kFloat64RoundTiesEven |
+        kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt |
+        kWord32ReverseBits | kWord64ReverseBits | kFloat32Neg | kFloat64Neg
   };
   typedef base::Flags<Flag, unsigned> Flags;
 
+  class AlignmentRequirements {
+   public:
+    enum UnalignedAccessSupport { kNoSupport, kSomeSupport, kFullSupport };
+
+    bool IsUnalignedLoadSupported(const MachineType& machineType,
+                                  uint8_t alignment) const {
+      return IsUnalignedSupported(unalignedLoadSupportedTypes_, machineType,
+                                  alignment);
+    }
+
+    bool IsUnalignedStoreSupported(const MachineType& machineType,
+                                   uint8_t alignment) const {
+      return IsUnalignedSupported(unalignedStoreSupportedTypes_, machineType,
+                                  alignment);
+    }
+
+    static AlignmentRequirements FullUnalignedAccessSupport() {
+      return AlignmentRequirements(kFullSupport);
+    }
+    static AlignmentRequirements NoUnalignedAccessSupport() {
+      return AlignmentRequirements(kNoSupport);
+    }
+    static AlignmentRequirements SomeUnalignedAccessSupport(
+        const Vector<MachineType>& unalignedLoadSupportedTypes,
+        const Vector<MachineType>& unalignedStoreSupportedTypes) {
+      return AlignmentRequirements(kSomeSupport, unalignedLoadSupportedTypes,
+                                   unalignedStoreSupportedTypes);
+    }
+
+   private:
+    explicit AlignmentRequirements(
+        AlignmentRequirements::UnalignedAccessSupport unalignedAccessSupport,
+        Vector<MachineType> unalignedLoadSupportedTypes =
+            Vector<MachineType>(NULL, 0),
+        Vector<MachineType> unalignedStoreSupportedTypes =
+            Vector<MachineType>(NULL, 0))
+        : unalignedSupport_(unalignedAccessSupport),
+          unalignedLoadSupportedTypes_(unalignedLoadSupportedTypes),
+          unalignedStoreSupportedTypes_(unalignedStoreSupportedTypes) {}
+
+    bool IsUnalignedSupported(const Vector<MachineType>& supported,
+                              const MachineType& machineType,
+                              uint8_t alignment) const {
+      if (unalignedSupport_ == kFullSupport) {
+        return true;
+      } else if (unalignedSupport_ == kNoSupport) {
+        return false;
+      } else {
+        for (MachineType m : supported) {
+          if (m == machineType) {
+            return true;
+          }
+        }
+        return false;
+      }
+    }
+
+    const AlignmentRequirements::UnalignedAccessSupport unalignedSupport_;
+    const Vector<MachineType> unalignedLoadSupportedTypes_;
+    const Vector<MachineType> unalignedStoreSupportedTypes_;
+  };
+
   explicit MachineOperatorBuilder(
       Zone* zone,
       MachineRepresentation word = MachineType::PointerRepresentation(),
-      Flags supportedOperators = kNoFlags);
+      Flags supportedOperators = kNoFlags,
+      AlignmentRequirements alignmentRequirements =
+          AlignmentRequirements::NoUnalignedAccessSupport());
+
+  const Operator* Comment(const char* msg);
+  const Operator* DebugBreak();
 
   const Operator* Word32And();
   const Operator* Word32Or();
@@ -295,12 +364,42 @@
   const OptionalOperator Float32RoundTiesEven();
   const OptionalOperator Float64RoundTiesEven();
 
+  // Floating point neg.
+  const OptionalOperator Float32Neg();
+  const OptionalOperator Float64Neg();
+
+  // Floating point trigonometric functions (double-precision).
+  const Operator* Float64Atan();
+  const Operator* Float64Atan2();
+  const Operator* Float64Atanh();
+
+  // Floating point trigonometric functions (double-precision).
+  const Operator* Float64Cos();
+  const Operator* Float64Sin();
+  const Operator* Float64Tan();
+
+  // Floating point exponential functions (double-precision).
+  const Operator* Float64Exp();
+
+  // Floating point logarithm (double-precision).
+  const Operator* Float64Log();
+  const Operator* Float64Log1p();
+  const Operator* Float64Log2();
+  const Operator* Float64Log10();
+
+  const Operator* Float64Cbrt();
+  const Operator* Float64Expm1();
+
   // Floating point bit representation.
   const Operator* Float64ExtractLowWord32();
   const Operator* Float64ExtractHighWord32();
   const Operator* Float64InsertLowWord32();
   const Operator* Float64InsertHighWord32();
 
+  // Change signalling NaN to quiet NaN.
+  // Identity for any input that is not signalling NaN.
+  const Operator* Float64SilenceNaN();
+
   // SIMD operators.
   const Operator* CreateFloat32x4();
   const Operator* Float32x4ExtractLane();
@@ -513,6 +612,18 @@
   bool Is64() const { return word() == MachineRepresentation::kWord64; }
   MachineRepresentation word() const { return word_; }
 
+  bool UnalignedLoadSupported(const MachineType& machineType,
+                              uint8_t alignment) {
+    return alignment_requirements_.IsUnalignedLoadSupported(machineType,
+                                                            alignment);
+  }
+
+  bool UnalignedStoreSupported(const MachineType& machineType,
+                               uint8_t alignment) {
+    return alignment_requirements_.IsUnalignedStoreSupported(machineType,
+                                                             alignment);
+  }
+
 // Pseudo operators that translate to 32/64-bit operators depending on the
 // word-size of the target machine assumed by this builder.
 #define PSEUDO_OP_LIST(V) \
@@ -544,9 +655,11 @@
 #undef PSEUDO_OP_LIST
 
  private:
+  Zone* zone_;
   MachineOperatorGlobalCache const& cache_;
   MachineRepresentation const word_;
   Flags const flags_;
+  AlignmentRequirements const alignment_requirements_;
 
   DISALLOW_COPY_AND_ASSIGN(MachineOperatorBuilder);
 };
diff --git a/src/compiler/memory-optimizer.cc b/src/compiler/memory-optimizer.cc
index 59fd899..8c66347 100644
--- a/src/compiler/memory-optimizer.cc
+++ b/src/compiler/memory-optimizer.cc
@@ -87,6 +87,8 @@
       return VisitStoreField(node, state);
     case IrOpcode::kCheckedLoad:
     case IrOpcode::kCheckedStore:
+    case IrOpcode::kDeoptimizeIf:
+    case IrOpcode::kDeoptimizeUnless:
     case IrOpcode::kIfException:
     case IrOpcode::kLoad:
     case IrOpcode::kStore:
diff --git a/src/compiler/mips/code-generator-mips.cc b/src/compiler/mips/code-generator-mips.cc
index c437d5e..5e30e34 100644
--- a/src/compiler/mips/code-generator-mips.cc
+++ b/src/compiler/mips/code-generator-mips.cc
@@ -485,6 +485,29 @@
     __ sync();                                           \
   } while (0)
 
+#define ASSEMBLE_IEEE754_BINOP(name)                                          \
+  do {                                                                        \
+    FrameScope scope(masm(), StackFrame::MANUAL);                             \
+    __ PrepareCallCFunction(0, 2, kScratchReg);                               \
+    __ MovToFloatParameters(i.InputDoubleRegister(0),                         \
+                            i.InputDoubleRegister(1));                        \
+    __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+                     0, 2);                                                   \
+    /* Move the result in the double result register. */                      \
+    __ MovFromFloatResult(i.OutputDoubleRegister());                          \
+  } while (0)
+
+#define ASSEMBLE_IEEE754_UNOP(name)                                           \
+  do {                                                                        \
+    FrameScope scope(masm(), StackFrame::MANUAL);                             \
+    __ PrepareCallCFunction(0, 1, kScratchReg);                               \
+    __ MovToFloatParameter(i.InputDoubleRegister(0));                         \
+    __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+                     0, 1);                                                   \
+    /* Move the result in the double result register. */                      \
+    __ MovFromFloatResult(i.OutputDoubleRegister());                          \
+  } while (0)
+
 void CodeGenerator::AssembleDeconstructFrame() {
   __ mov(sp, fp);
   __ Pop(ra, fp);
@@ -652,6 +675,14 @@
     case kArchTableSwitch:
       AssembleArchTableSwitch(instr);
       break;
+    case kArchDebugBreak:
+      __ stop("kArchDebugBreak");
+      break;
+    case kArchComment: {
+      Address comment_string = i.InputExternalReference(0).address();
+      __ RecordComment(reinterpret_cast<const char*>(comment_string));
+      break;
+    }
     case kArchNop:
     case kArchThrowTerminator:
       // don't emit code for nops.
@@ -710,6 +741,45 @@
               Operand(offset.offset()));
       break;
     }
+    case kIeee754Float64Atan:
+      ASSEMBLE_IEEE754_UNOP(atan);
+      break;
+    case kIeee754Float64Atan2:
+      ASSEMBLE_IEEE754_BINOP(atan2);
+      break;
+    case kIeee754Float64Cos:
+      ASSEMBLE_IEEE754_UNOP(cos);
+      break;
+    case kIeee754Float64Cbrt:
+      ASSEMBLE_IEEE754_UNOP(cbrt);
+      break;
+    case kIeee754Float64Exp:
+      ASSEMBLE_IEEE754_UNOP(exp);
+      break;
+    case kIeee754Float64Expm1:
+      ASSEMBLE_IEEE754_UNOP(expm1);
+      break;
+    case kIeee754Float64Atanh:
+      ASSEMBLE_IEEE754_UNOP(atanh);
+      break;
+    case kIeee754Float64Log:
+      ASSEMBLE_IEEE754_UNOP(log);
+      break;
+    case kIeee754Float64Log1p:
+      ASSEMBLE_IEEE754_UNOP(log1p);
+      break;
+    case kIeee754Float64Log10:
+      ASSEMBLE_IEEE754_UNOP(log10);
+      break;
+    case kIeee754Float64Log2:
+      ASSEMBLE_IEEE754_UNOP(log2);
+      break;
+    case kIeee754Float64Sin:
+      ASSEMBLE_IEEE754_UNOP(sin);
+      break;
+    case kIeee754Float64Tan:
+      ASSEMBLE_IEEE754_UNOP(tan);
+      break;
     case kMipsAdd:
       __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
       break;
@@ -938,6 +1008,11 @@
       __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
                i.InputDoubleRegister(1));
       break;
+    case kMipsSubPreserveNanS:
+      __ SubNanPreservePayloadAndSign_s(i.OutputDoubleRegister(),
+                                        i.InputDoubleRegister(0),
+                                        i.InputDoubleRegister(1));
+      break;
     case kMipsMulS:
       // TODO(plind): add special case: right op is -1.0, see arm port.
       __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1004,6 +1079,11 @@
       __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
                i.InputDoubleRegister(1));
       break;
+    case kMipsSubPreserveNanD:
+      __ SubNanPreservePayloadAndSign_d(i.OutputDoubleRegister(),
+                                        i.InputDoubleRegister(0),
+                                        i.InputDoubleRegister(1));
+      break;
     case kMipsMulD:
       // TODO(plind): add special case: right op is -1.0, see arm port.
       __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1233,6 +1313,20 @@
     case kMipsFloat64InsertHighWord32:
       __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1));
       break;
+    case kMipsFloat64SilenceNaN: {
+      FPURegister value = i.InputDoubleRegister(0);
+      FPURegister result = i.OutputDoubleRegister();
+      Register scratch0 = i.TempRegister(0);
+      Label is_nan, not_nan;
+      __ BranchF(NULL, &is_nan, eq, value, value);
+      __ Branch(&not_nan);
+      __ bind(&is_nan);
+      __ LoadRoot(scratch0, Heap::kNanValueRootIndex);
+      __ ldc1(result, FieldMemOperand(scratch0, HeapNumber::kValueOffset));
+      __ bind(&not_nan);
+      break;
+    }
+
     // ... more basic instructions ...
 
     case kMipsLbu:
@@ -1292,7 +1386,13 @@
     }
     case kMipsStoreToStackSlot: {
       if (instr->InputAt(0)->IsFPRegister()) {
-        __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
+        LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
+        if (op->representation() == MachineRepresentation::kFloat64) {
+          __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
+        } else {
+          DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+          __ swc1(i.InputSingleRegister(0), MemOperand(sp, i.InputInt32(1)));
+        }
       } else {
         __ sw(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
       }
@@ -1804,6 +1904,7 @@
       switch (src.type()) {
         case Constant::kInt32:
           if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
               src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
             __ li(dst, Operand(src.ToInt32(), src.rmode()));
           } else {
@@ -1872,7 +1973,13 @@
     DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
     MemOperand src = g.ToMemOperand(source);
     if (destination->IsFPRegister()) {
-      __ ldc1(g.ToDoubleRegister(destination), src);
+      LocationOperand* op = LocationOperand::cast(source);
+      if (op->representation() == MachineRepresentation::kFloat64) {
+        __ ldc1(g.ToDoubleRegister(destination), src);
+      } else {
+        DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+        __ lwc1(g.ToDoubleRegister(destination), src);
+      }
     } else {
       FPURegister temp = kScratchDoubleReg;
       __ ldc1(temp, src);
diff --git a/src/compiler/mips/instruction-codes-mips.h b/src/compiler/mips/instruction-codes-mips.h
index 5c36525..766a5b1 100644
--- a/src/compiler/mips/instruction-codes-mips.h
+++ b/src/compiler/mips/instruction-codes-mips.h
@@ -46,6 +46,7 @@
   V(MipsCmpS)                      \
   V(MipsAddS)                      \
   V(MipsSubS)                      \
+  V(MipsSubPreserveNanS)           \
   V(MipsMulS)                      \
   V(MipsDivS)                      \
   V(MipsModS)                      \
@@ -56,6 +57,7 @@
   V(MipsCmpD)                      \
   V(MipsAddD)                      \
   V(MipsSubD)                      \
+  V(MipsSubPreserveNanD)           \
   V(MipsMulD)                      \
   V(MipsDivD)                      \
   V(MipsModD)                      \
@@ -106,6 +108,7 @@
   V(MipsFloat64ExtractHighWord32)  \
   V(MipsFloat64InsertLowWord32)    \
   V(MipsFloat64InsertHighWord32)   \
+  V(MipsFloat64SilenceNaN)         \
   V(MipsFloat64Max)                \
   V(MipsFloat64Min)                \
   V(MipsFloat32Max)                \
diff --git a/src/compiler/mips/instruction-selector-mips.cc b/src/compiler/mips/instruction-selector-mips.cc
index cccb39a..c95613e 100644
--- a/src/compiler/mips/instruction-selector-mips.cc
+++ b/src/compiler/mips/instruction-selector-mips.cc
@@ -755,7 +755,7 @@
 }
 
 void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
-  VisitRRR(this, kMipsSubS, node);
+  VisitRRR(this, kMipsSubPreserveNanS, node);
 }
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
@@ -777,7 +777,7 @@
 }
 
 void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
-  VisitRRR(this, kMipsSubD, node);
+  VisitRRR(this, kMipsSubPreserveNanD, node);
 }
 
 void InstructionSelector::VisitFloat32Mul(Node* node) {
@@ -876,7 +876,6 @@
   VisitRR(this, kMipsAbsD, node);
 }
 
-
 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
   VisitRR(this, kMipsSqrtS, node);
 }
@@ -931,6 +930,24 @@
   VisitRR(this, kMipsFloat64RoundTiesEven, node);
 }
 
+void InstructionSelector::VisitFloat32Neg(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Neg(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+                                                   InstructionCode opcode) {
+  MipsOperandGenerator g(this);
+  Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12),
+       g.UseFixed(node->InputAt(1), f14))
+      ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+                                                  InstructionCode opcode) {
+  MipsOperandGenerator g(this);
+  Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12))
+      ->MarkAsCall();
+}
 
 void InstructionSelector::EmitPrepareArguments(
     ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
@@ -1454,6 +1471,14 @@
        g.UseRegister(left), g.UseRegister(right));
 }
 
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+  MipsOperandGenerator g(this);
+  Node* left = node->InputAt(0);
+  InstructionOperand temps[] = {g.TempRegister()};
+  Emit(kMipsFloat64SilenceNaN, g.DefineSameAsFirst(node), g.UseRegister(left),
+       arraysize(temps), temps);
+}
+
 void InstructionSelector::VisitAtomicLoad(Node* node) {
   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
   MipsOperandGenerator g(this);
@@ -1548,6 +1573,20 @@
          MachineOperatorBuilder::kFloat32RoundTiesEven;
 }
 
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+  if (IsMipsArchVariant(kMips32r6)) {
+    return MachineOperatorBuilder::AlignmentRequirements::
+        FullUnalignedAccessSupport();
+  } else {
+    DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
+           IsMipsArchVariant(kMips32r2));
+    return MachineOperatorBuilder::AlignmentRequirements::
+        NoUnalignedAccessSupport();
+  }
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/mips64/code-generator-mips64.cc b/src/compiler/mips64/code-generator-mips64.cc
index a7d2301..9d4201f 100644
--- a/src/compiler/mips64/code-generator-mips64.cc
+++ b/src/compiler/mips64/code-generator-mips64.cc
@@ -496,6 +496,29 @@
     __ sync();                                           \
   } while (0)
 
+#define ASSEMBLE_IEEE754_BINOP(name)                                          \
+  do {                                                                        \
+    FrameScope scope(masm(), StackFrame::MANUAL);                             \
+    __ PrepareCallCFunction(0, 2, kScratchReg);                               \
+    __ MovToFloatParameters(i.InputDoubleRegister(0),                         \
+                            i.InputDoubleRegister(1));                        \
+    __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+                     0, 2);                                                   \
+    /* Move the result in the double result register. */                      \
+    __ MovFromFloatResult(i.OutputDoubleRegister());                          \
+  } while (0)
+
+#define ASSEMBLE_IEEE754_UNOP(name)                                           \
+  do {                                                                        \
+    FrameScope scope(masm(), StackFrame::MANUAL);                             \
+    __ PrepareCallCFunction(0, 1, kScratchReg);                               \
+    __ MovToFloatParameter(i.InputDoubleRegister(0));                         \
+    __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+                     0, 1);                                                   \
+    /* Move the result in the double result register. */                      \
+    __ MovFromFloatResult(i.OutputDoubleRegister());                          \
+  } while (0)
+
 void CodeGenerator::AssembleDeconstructFrame() {
   __ mov(sp, fp);
   __ Pop(ra, fp);
@@ -661,6 +684,14 @@
     case kArchTableSwitch:
       AssembleArchTableSwitch(instr);
       break;
+    case kArchDebugBreak:
+      __ stop("kArchDebugBreak");
+      break;
+    case kArchComment: {
+      Address comment_string = i.InputExternalReference(0).address();
+      __ RecordComment(reinterpret_cast<const char*>(comment_string));
+      break;
+    }
     case kArchNop:
     case kArchThrowTerminator:
       // don't emit code for nops.
@@ -719,6 +750,45 @@
                Operand(offset.offset()));
       break;
     }
+    case kIeee754Float64Atan:
+      ASSEMBLE_IEEE754_UNOP(atan);
+      break;
+    case kIeee754Float64Atan2:
+      ASSEMBLE_IEEE754_BINOP(atan2);
+      break;
+    case kIeee754Float64Atanh:
+      ASSEMBLE_IEEE754_UNOP(atanh);
+      break;
+    case kIeee754Float64Cos:
+      ASSEMBLE_IEEE754_UNOP(cos);
+      break;
+    case kIeee754Float64Cbrt:
+      ASSEMBLE_IEEE754_UNOP(cbrt);
+      break;
+    case kIeee754Float64Exp:
+      ASSEMBLE_IEEE754_UNOP(exp);
+      break;
+    case kIeee754Float64Expm1:
+      ASSEMBLE_IEEE754_UNOP(expm1);
+      break;
+    case kIeee754Float64Log:
+      ASSEMBLE_IEEE754_UNOP(log);
+      break;
+    case kIeee754Float64Log1p:
+      ASSEMBLE_IEEE754_UNOP(log1p);
+      break;
+    case kIeee754Float64Log2:
+      ASSEMBLE_IEEE754_UNOP(log2);
+      break;
+    case kIeee754Float64Log10:
+      ASSEMBLE_IEEE754_UNOP(log10);
+      break;
+    case kIeee754Float64Sin:
+      ASSEMBLE_IEEE754_UNOP(sin);
+      break;
+    case kIeee754Float64Tan:
+      ASSEMBLE_IEEE754_UNOP(tan);
+      break;
     case kMips64Add:
       __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
       break;
@@ -1102,6 +1172,11 @@
       __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
                i.InputDoubleRegister(1));
       break;
+    case kMips64SubPreserveNanS:
+      __ SubNanPreservePayloadAndSign_s(i.OutputDoubleRegister(),
+                                        i.InputDoubleRegister(0),
+                                        i.InputDoubleRegister(1));
+      break;
     case kMips64MulS:
       // TODO(plind): add special case: right op is -1.0, see arm port.
       __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1152,6 +1227,11 @@
       __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
                i.InputDoubleRegister(1));
       break;
+    case kMips64SubPreserveNanD:
+      __ SubNanPreservePayloadAndSign_d(i.OutputDoubleRegister(),
+                                        i.InputDoubleRegister(0),
+                                        i.InputDoubleRegister(1));
+      break;
     case kMips64MulD:
       // TODO(plind): add special case: right op is -1.0, see arm port.
       __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1249,6 +1329,9 @@
       }
       break;
     }
+    case kMips64Float64SilenceNaN:
+      __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
     case kMips64Float32Max: {
       // (b < a) ? a : b
       if (kArchVariant == kMips64r6) {
@@ -2079,7 +2162,8 @@
           __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
           break;
         case Constant::kInt64:
-          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE) {
+          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
             __ li(dst, Operand(src.ToInt64(), src.rmode()));
           } else {
             DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
diff --git a/src/compiler/mips64/instruction-codes-mips64.h b/src/compiler/mips64/instruction-codes-mips64.h
index 6fd321e..67c84f1 100644
--- a/src/compiler/mips64/instruction-codes-mips64.h
+++ b/src/compiler/mips64/instruction-codes-mips64.h
@@ -61,6 +61,7 @@
   V(Mips64CmpS)                     \
   V(Mips64AddS)                     \
   V(Mips64SubS)                     \
+  V(Mips64SubPreserveNanS)          \
   V(Mips64MulS)                     \
   V(Mips64DivS)                     \
   V(Mips64ModS)                     \
@@ -71,6 +72,7 @@
   V(Mips64CmpD)                     \
   V(Mips64AddD)                     \
   V(Mips64SubD)                     \
+  V(Mips64SubPreserveNanD)          \
   V(Mips64MulD)                     \
   V(Mips64DivD)                     \
   V(Mips64ModD)                     \
@@ -133,6 +135,7 @@
   V(Mips64Float64InsertHighWord32)  \
   V(Mips64Float64Max)               \
   V(Mips64Float64Min)               \
+  V(Mips64Float64SilenceNaN)        \
   V(Mips64Float32Max)               \
   V(Mips64Float32Min)               \
   V(Mips64Push)                     \
diff --git a/src/compiler/mips64/instruction-selector-mips64.cc b/src/compiler/mips64/instruction-selector-mips64.cc
index 3516e76..3e1f98e 100644
--- a/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/src/compiler/mips64/instruction-selector-mips64.cc
@@ -1160,7 +1160,7 @@
 }
 
 void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
-  VisitRRR(this, kMips64SubS, node);
+  VisitRRR(this, kMips64SubPreserveNanS, node);
 }
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
@@ -1182,7 +1182,7 @@
 }
 
 void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
-  VisitRRR(this, kMips64SubD, node);
+  VisitRRR(this, kMips64SubPreserveNanD, node);
 }
 
 void InstructionSelector::VisitFloat32Mul(Node* node) {
@@ -1282,7 +1282,6 @@
   VisitRR(this, kMips64AbsD, node);
 }
 
-
 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
   VisitRR(this, kMips64SqrtS, node);
 }
@@ -1337,6 +1336,24 @@
   VisitRR(this, kMips64Float64RoundTiesEven, node);
 }
 
+void InstructionSelector::VisitFloat32Neg(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Neg(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+                                                   InstructionCode opcode) {
+  Mips64OperandGenerator g(this);
+  Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12),
+       g.UseFixed(node->InputAt(1), f14))
+      ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+                                                  InstructionCode opcode) {
+  Mips64OperandGenerator g(this);
+  Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12))
+      ->MarkAsCall();
+}
 
 void InstructionSelector::EmitPrepareArguments(
     ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
@@ -1947,6 +1964,9 @@
   VisitRR(this, kMips64Float64ExtractHighWord32, node);
 }
 
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+  VisitRR(this, kMips64Float64SilenceNaN, node);
+}
 
 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
   Mips64OperandGenerator g(this);
@@ -2057,6 +2077,19 @@
          MachineOperatorBuilder::kFloat32RoundTiesEven;
 }
 
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+  if (kArchVariant == kMips64r6) {
+    return MachineOperatorBuilder::AlignmentRequirements::
+        FullUnalignedAccessSupport();
+  } else {
+    DCHECK(kArchVariant == kMips64r2);
+    return MachineOperatorBuilder::AlignmentRequirements::
+        NoUnalignedAccessSupport();
+  }
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/move-optimizer.cc b/src/compiler/move-optimizer.cc
index 477f139..4753d15 100644
--- a/src/compiler/move-optimizer.cc
+++ b/src/compiler/move-optimizer.cc
@@ -24,16 +24,38 @@
   }
 };
 
-struct OperandCompare {
-  bool operator()(const InstructionOperand& a,
-                  const InstructionOperand& b) const {
-    return a.CompareCanonicalized(b);
-  }
-};
-
 typedef ZoneMap<MoveKey, unsigned, MoveKeyCompare> MoveMap;
 typedef ZoneSet<InstructionOperand, CompareOperandModuloType> OperandSet;
 
+bool Blocks(const OperandSet& set, const InstructionOperand& operand) {
+  if (set.find(operand) != set.end()) return true;
+  // Only FP registers on archs with non-simple aliasing need extra checks.
+  if (!operand.IsFPRegister() || kSimpleFPAliasing) return false;
+
+  const LocationOperand& loc = LocationOperand::cast(operand);
+  MachineRepresentation rep = loc.representation();
+  MachineRepresentation other_fp_rep = rep == MachineRepresentation::kFloat64
+                                           ? MachineRepresentation::kFloat32
+                                           : MachineRepresentation::kFloat64;
+  const RegisterConfiguration* config = RegisterConfiguration::Turbofan();
+  if (config->fp_aliasing_kind() != RegisterConfiguration::COMBINE) {
+    // Overlap aliasing case.
+    return set.find(LocationOperand(loc.kind(), loc.location_kind(),
+                                    other_fp_rep, loc.register_code())) !=
+           set.end();
+  }
+  // Combine aliasing case.
+  int alias_base_index = -1;
+  int aliases = config->GetAliases(rep, loc.register_code(), other_fp_rep,
+                                   &alias_base_index);
+  while (aliases--) {
+    int aliased_reg = alias_base_index + aliases;
+    if (set.find(LocationOperand(loc.kind(), loc.location_kind(), other_fp_rep,
+                                 aliased_reg)) != set.end())
+      return true;
+  }
+  return false;
+}
 
 int FindFirstNonEmptySlot(const Instruction* instr) {
   int i = Instruction::FIRST_GAP_POSITION;
@@ -138,8 +160,8 @@
   ParallelMove* from_moves = from->parallel_moves()[0];
   if (from_moves == nullptr || from_moves->empty()) return;
 
-  ZoneSet<InstructionOperand, OperandCompare> dst_cant_be(local_zone());
-  ZoneSet<InstructionOperand, OperandCompare> src_cant_be(local_zone());
+  OperandSet dst_cant_be(local_zone());
+  OperandSet src_cant_be(local_zone());
 
   // If an operand is an input to the instruction, we cannot move assignments
   // where it appears on the LHS.
@@ -172,7 +194,7 @@
   // destination operands are eligible for being moved down.
   for (MoveOperands* move : *from_moves) {
     if (move->IsRedundant()) continue;
-    if (dst_cant_be.find(move->destination()) == dst_cant_be.end()) {
+    if (!Blocks(dst_cant_be, move->destination())) {
       MoveKey key = {move->source(), move->destination()};
       move_candidates.insert(key);
     }
@@ -187,7 +209,7 @@
       auto current = iter;
       ++iter;
       InstructionOperand src = current->source;
-      if (src_cant_be.find(src) != src_cant_be.end()) {
+      if (Blocks(src_cant_be, src)) {
         src_cant_be.insert(current->destination);
         move_candidates.erase(current);
         changed = true;
diff --git a/src/compiler/node-cache.cc b/src/compiler/node-cache.cc
index 79c342b..061a3ae 100644
--- a/src/compiler/node-cache.cc
+++ b/src/compiler/node-cache.cc
@@ -115,6 +115,9 @@
 template class NodeCache<int32_t>;
 template class NodeCache<int64_t>;
 
+template class NodeCache<RelocInt32Key>;
+template class NodeCache<RelocInt64Key>;
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/node-cache.h b/src/compiler/node-cache.h
index a8f9071..7063a3b 100644
--- a/src/compiler/node-cache.h
+++ b/src/compiler/node-cache.h
@@ -63,6 +63,14 @@
 // Various default cache types.
 typedef NodeCache<int32_t> Int32NodeCache;
 typedef NodeCache<int64_t> Int64NodeCache;
+
+// All we want is the numeric value of the RelocInfo::Mode enum. We typedef
+// below to avoid pulling in assembler.h
+typedef char RelocInfoMode;
+typedef std::pair<int32_t, RelocInfoMode> RelocInt32Key;
+typedef std::pair<int64_t, RelocInfoMode> RelocInt64Key;
+typedef NodeCache<RelocInt32Key> RelocInt32NodeCache;
+typedef NodeCache<RelocInt64Key> RelocInt64NodeCache;
 #if V8_HOST_ARCH_32_BIT
 typedef Int32NodeCache IntPtrNodeCache;
 #else
diff --git a/src/compiler/node-marker.h b/src/compiler/node-marker.h
index 5ef2063..84666d5 100644
--- a/src/compiler/node-marker.h
+++ b/src/compiler/node-marker.h
@@ -42,9 +42,22 @@
   DISALLOW_COPY_AND_ASSIGN(NodeMarkerBase);
 };
 
-
-// A NodeMarker uses monotonically increasing marks to assign local "states"
-// to nodes. Only one NodeMarker per graph is valid at a given time.
+// A NodeMarker assigns a local "state" to every node of a graph in constant
+// memory. Only one NodeMarker per graph is valid at a given time, that is,
+// after you create a NodeMarker you should no longer use NodeMarkers that
+// were created earlier. Internally, the local state is stored in the Node
+// structure.
+//
+// When you initialize a NodeMarker, all the local states are conceptually
+// set to State(0) in constant time.
+//
+// In its current implementation, in debug mode NodeMarker will try to
+// (efficiently) detect invalid use of an older NodeMarker. Namely, if you get
+// or set a node with a NodeMarker, and then get or set that node
+// with an older NodeMarker you will get a crash.
+//
+// GraphReducer uses a NodeMarker, so individual Reducers cannot use a
+// NodeMarker.
 template <typename State>
 class NodeMarker : public NodeMarkerBase {
  public:
diff --git a/src/compiler/node-properties.cc b/src/compiler/node-properties.cc
index 2cf899b..dc33d60 100644
--- a/src/compiler/node-properties.cc
+++ b/src/compiler/node-properties.cc
@@ -180,13 +180,6 @@
 
 
 // static
-void NodeProperties::RemoveFrameStateInput(Node* node, int index) {
-  DCHECK_LT(index, OperatorProperties::GetFrameStateInputCount(node->op()));
-  node->RemoveInput(FirstFrameStateIndex(node) + index);
-}
-
-
-// static
 void NodeProperties::RemoveNonValueInputs(Node* node) {
   node->TrimInputCount(node->op()->ValueInputCount());
 }
@@ -222,7 +215,8 @@
         DCHECK_NOT_NULL(exception);
         edge.UpdateTo(exception);
       } else {
-        UNREACHABLE();
+        DCHECK_NOT_NULL(success);
+        edge.UpdateTo(success);
       }
     } else if (IsEffectEdge(edge)) {
       DCHECK_NOT_NULL(effect);
@@ -243,6 +237,18 @@
 
 
 // static
+Node* NodeProperties::FindFrameStateBefore(Node* node) {
+  Node* effect = NodeProperties::GetEffectInput(node);
+  while (effect->opcode() != IrOpcode::kCheckpoint) {
+    if (effect->opcode() == IrOpcode::kDead) return effect;
+    DCHECK_EQ(1, effect->op()->EffectInputCount());
+    effect = NodeProperties::GetEffectInput(effect);
+  }
+  Node* frame_state = GetFrameStateInput(effect, 0);
+  return frame_state;
+}
+
+// static
 Node* NodeProperties::FindProjection(Node* node, size_t projection_index) {
   for (auto use : node->uses()) {
     if (use->opcode() == IrOpcode::kProjection &&
diff --git a/src/compiler/node-properties.h b/src/compiler/node-properties.h
index 78ffd1d..fbc06fc 100644
--- a/src/compiler/node-properties.h
+++ b/src/compiler/node-properties.h
@@ -84,7 +84,6 @@
   static void ReplaceControlInput(Node* node, Node* control, int index = 0);
   static void ReplaceEffectInput(Node* node, Node* effect, int index = 0);
   static void ReplaceFrameStateInput(Node* node, int index, Node* frame_state);
-  static void RemoveFrameStateInput(Node* node, int index);
   static void RemoveNonValueInputs(Node* node);
   static void RemoveValueInputs(Node* node);
 
@@ -109,6 +108,11 @@
   // ---------------------------------------------------------------------------
   // Miscellaneous utilities.
 
+  // Find the last frame state that is effect-wise before the given node. This
+  // assumes a linear effect-chain up to a {CheckPoint} node in the graph.
+  static Node* FindFrameStateBefore(Node* node);
+
+  // Collect the output-value projection for the given output index.
   static Node* FindProjection(Node* node, size_t projection_index);
 
   // Collect the branch-related projections from a node, such as IfTrue,
diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h
index ce5087c..c823afb 100644
--- a/src/compiler/opcodes.h
+++ b/src/compiler/opcodes.h
@@ -47,7 +47,7 @@
   V(Select)              \
   V(Phi)                 \
   V(EffectPhi)           \
-  V(CheckPoint)          \
+  V(Checkpoint)          \
   V(BeginRegion)         \
   V(FinishRegion)        \
   V(FrameState)          \
@@ -140,17 +140,20 @@
   V(JSCreateModuleContext)    \
   V(JSCreateScriptContext)
 
-#define JS_OTHER_OP_LIST(V) \
-  V(JSCallConstruct)        \
-  V(JSCallFunction)         \
-  V(JSCallRuntime)          \
-  V(JSConvertReceiver)      \
-  V(JSForInDone)            \
-  V(JSForInNext)            \
-  V(JSForInPrepare)         \
-  V(JSForInStep)            \
-  V(JSLoadMessage)          \
-  V(JSStoreMessage)         \
+#define JS_OTHER_OP_LIST(V)         \
+  V(JSCallConstruct)                \
+  V(JSCallFunction)                 \
+  V(JSCallRuntime)                  \
+  V(JSConvertReceiver)              \
+  V(JSForInDone)                    \
+  V(JSForInNext)                    \
+  V(JSForInPrepare)                 \
+  V(JSForInStep)                    \
+  V(JSLoadMessage)                  \
+  V(JSStoreMessage)                 \
+  V(JSGeneratorStore)               \
+  V(JSGeneratorRestoreContinuation) \
+  V(JSGeneratorRestoreRegister)     \
   V(JSStackCheck)
 
 #define JS_OP_LIST(V)     \
@@ -170,55 +173,95 @@
   V(StringLessThan)                      \
   V(StringLessThanOrEqual)
 
-#define SIMPLIFIED_OP_LIST(V)      \
-  SIMPLIFIED_COMPARE_BINOP_LIST(V) \
-  V(BooleanNot)                    \
-  V(BooleanToNumber)               \
-  V(NumberAdd)                     \
-  V(NumberSubtract)                \
-  V(NumberMultiply)                \
-  V(NumberDivide)                  \
-  V(NumberModulus)                 \
-  V(NumberBitwiseOr)               \
-  V(NumberBitwiseXor)              \
-  V(NumberBitwiseAnd)              \
-  V(NumberShiftLeft)               \
-  V(NumberShiftRight)              \
-  V(NumberShiftRightLogical)       \
-  V(NumberImul)                    \
-  V(NumberClz32)                   \
-  V(NumberCeil)                    \
-  V(NumberFloor)                   \
-  V(NumberRound)                   \
-  V(NumberTrunc)                   \
-  V(NumberToInt32)                 \
-  V(NumberToUint32)                \
-  V(NumberIsHoleNaN)               \
-  V(StringToNumber)                \
-  V(ChangeTaggedSignedToInt32)     \
-  V(ChangeTaggedToInt32)           \
-  V(ChangeTaggedToUint32)          \
-  V(ChangeTaggedToFloat64)         \
-  V(ChangeInt31ToTaggedSigned)     \
-  V(ChangeInt32ToTagged)           \
-  V(ChangeUint32ToTagged)          \
-  V(ChangeFloat64ToTagged)         \
-  V(ChangeTaggedToBit)             \
-  V(ChangeBitToTagged)             \
-  V(TruncateTaggedToWord32)        \
-  V(Allocate)                      \
-  V(LoadField)                     \
-  V(LoadBuffer)                    \
-  V(LoadElement)                   \
-  V(StoreField)                    \
-  V(StoreBuffer)                   \
-  V(StoreElement)                  \
-  V(ObjectIsCallable)              \
-  V(ObjectIsNumber)                \
-  V(ObjectIsReceiver)              \
-  V(ObjectIsSmi)                   \
-  V(ObjectIsString)                \
-  V(ObjectIsUndetectable)          \
+#define SIMPLIFIED_OP_LIST(V)         \
+  SIMPLIFIED_COMPARE_BINOP_LIST(V)    \
+  V(PlainPrimitiveToNumber)           \
+  V(PlainPrimitiveToWord32)           \
+  V(PlainPrimitiveToFloat64)          \
+  V(BooleanNot)                       \
+  V(BooleanToNumber)                  \
+  V(SpeculativeNumberAdd)             \
+  V(SpeculativeNumberSubtract)        \
+  V(SpeculativeNumberMultiply)        \
+  V(SpeculativeNumberDivide)          \
+  V(SpeculativeNumberModulus)         \
+  V(SpeculativeNumberEqual)           \
+  V(SpeculativeNumberLessThan)        \
+  V(SpeculativeNumberLessThanOrEqual) \
+  V(NumberAdd)                        \
+  V(NumberSubtract)                   \
+  V(NumberMultiply)                   \
+  V(NumberDivide)                     \
+  V(NumberModulus)                    \
+  V(NumberBitwiseOr)                  \
+  V(NumberBitwiseXor)                 \
+  V(NumberBitwiseAnd)                 \
+  V(NumberShiftLeft)                  \
+  V(NumberShiftRight)                 \
+  V(NumberShiftRightLogical)          \
+  V(NumberImul)                       \
+  V(NumberAbs)                        \
+  V(NumberClz32)                      \
+  V(NumberCeil)                       \
+  V(NumberCos)                        \
+  V(NumberFloor)                      \
+  V(NumberFround)                     \
+  V(NumberAtan)                       \
+  V(NumberAtan2)                      \
+  V(NumberAtanh)                      \
+  V(NumberExp)                        \
+  V(NumberExpm1)                      \
+  V(NumberLog)                        \
+  V(NumberLog1p)                      \
+  V(NumberLog2)                       \
+  V(NumberLog10)                      \
+  V(NumberCbrt)                       \
+  V(NumberRound)                      \
+  V(NumberSin)                        \
+  V(NumberSqrt)                       \
+  V(NumberTan)                        \
+  V(NumberTrunc)                      \
+  V(NumberToInt32)                    \
+  V(NumberToUint32)                   \
+  V(NumberSilenceNaN)                 \
+  V(StringFromCharCode)               \
+  V(StringToNumber)                   \
+  V(ChangeTaggedSignedToInt32)        \
+  V(ChangeTaggedToInt32)              \
+  V(ChangeTaggedToUint32)             \
+  V(ChangeTaggedToFloat64)            \
+  V(ChangeInt31ToTaggedSigned)        \
+  V(ChangeInt32ToTagged)              \
+  V(ChangeUint32ToTagged)             \
+  V(ChangeFloat64ToTagged)            \
+  V(ChangeTaggedToBit)                \
+  V(ChangeBitToTagged)                \
+  V(CheckBounds)                      \
+  V(CheckTaggedPointer)               \
+  V(CheckTaggedSigned)                \
+  V(CheckedInt32Add)                  \
+  V(CheckedInt32Sub)                  \
+  V(CheckedUint32ToInt32)             \
+  V(CheckedFloat64ToInt32)            \
+  V(CheckedTaggedToInt32)             \
+  V(CheckedTaggedToFloat64)           \
+  V(CheckFloat64Hole)                 \
+  V(CheckTaggedHole)                  \
+  V(TruncateTaggedToWord32)           \
+  V(TruncateTaggedToFloat64)          \
+  V(Allocate)                         \
+  V(LoadField)                        \
+  V(LoadBuffer)                       \
+  V(LoadElement)                      \
+  V(StoreField)                       \
+  V(StoreBuffer)                      \
+  V(StoreElement)                     \
+  V(ObjectIsCallable)                 \
+  V(ObjectIsNumber)                   \
+  V(ObjectIsReceiver)                 \
+  V(ObjectIsSmi)                      \
+  V(ObjectIsString)                   \
+  V(ObjectIsUndetectable)             \
   V(TypeGuard)
 
 // Opcodes for Machine-level operators.
@@ -242,6 +285,8 @@
 
 #define MACHINE_OP_LIST(V)      \
   MACHINE_COMPARE_BINOP_LIST(V) \
+  V(DebugBreak)                 \
+  V(Comment)                    \
   V(Load)                       \
   V(Store)                      \
   V(StackSlot)                  \
@@ -292,6 +337,7 @@
   V(ChangeFloat32ToFloat64)     \
   V(ChangeFloat64ToInt32)       \
   V(ChangeFloat64ToUint32)      \
+  V(Float64SilenceNaN)          \
   V(TruncateFloat64ToUint32)    \
   V(TruncateFloat32ToInt32)     \
   V(TruncateFloat32ToUint32)    \
@@ -319,6 +365,7 @@
   V(Float32Add)                 \
   V(Float32Sub)                 \
   V(Float32SubPreserveNan)      \
+  V(Float32Neg)                 \
   V(Float32Mul)                 \
   V(Float32Div)                 \
   V(Float32Max)                 \
@@ -329,13 +376,27 @@
   V(Float64Add)                 \
   V(Float64Sub)                 \
   V(Float64SubPreserveNan)      \
+  V(Float64Neg)                 \
   V(Float64Mul)                 \
   V(Float64Div)                 \
   V(Float64Mod)                 \
   V(Float64Max)                 \
   V(Float64Min)                 \
   V(Float64Abs)                 \
+  V(Float64Atan)                \
+  V(Float64Atan2)               \
+  V(Float64Atanh)               \
+  V(Float64Cbrt)                \
+  V(Float64Cos)                 \
+  V(Float64Exp)                 \
+  V(Float64Expm1)               \
+  V(Float64Log)                 \
+  V(Float64Log1p)               \
+  V(Float64Log10)               \
+  V(Float64Log2)                \
+  V(Float64Sin)                 \
   V(Float64Sqrt)                \
+  V(Float64Tan)                 \
   V(Float64RoundDown)           \
   V(Float32RoundUp)             \
   V(Float64RoundUp)             \
@@ -509,19 +570,7 @@
   V(Bool8x16Swizzle)                        \
   V(Bool8x16Shuffle)                        \
   V(Bool8x16Equal)                          \
-  V(Bool8x16NotEqual)                       \
-  V(Simd128Load)                            \
-  V(Simd128Load1)                           \
-  V(Simd128Load2)                           \
-  V(Simd128Load3)                           \
-  V(Simd128Store)                           \
-  V(Simd128Store1)                          \
-  V(Simd128Store2)                          \
-  V(Simd128Store3)                          \
-  V(Simd128And)                             \
-  V(Simd128Or)                              \
-  V(Simd128Xor)                             \
-  V(Simd128Not)
+  V(Bool8x16NotEqual)
 
 #define MACHINE_SIMD_RETURN_NUM_OP_LIST(V) \
   V(Float32x4ExtractLane)                  \
@@ -540,10 +589,25 @@
   V(Bool8x16AnyTrue)                        \
   V(Bool8x16AllTrue)
 
+#define MACHINE_SIMD_GENERIC_OP_LIST(V) \
+  V(Simd128Load)                        \
+  V(Simd128Load1)                       \
+  V(Simd128Load2)                       \
+  V(Simd128Load3)                       \
+  V(Simd128Store)                       \
+  V(Simd128Store1)                      \
+  V(Simd128Store2)                      \
+  V(Simd128Store3)                      \
+  V(Simd128And)                         \
+  V(Simd128Or)                          \
+  V(Simd128Xor)                         \
+  V(Simd128Not)
+
 #define MACHINE_SIMD_OP_LIST(V)       \
   MACHINE_SIMD_RETURN_SIMD_OP_LIST(V) \
   MACHINE_SIMD_RETURN_NUM_OP_LIST(V)  \
-  MACHINE_SIMD_RETURN_BOOL_OP_LIST(V)
+  MACHINE_SIMD_RETURN_BOOL_OP_LIST(V) \
+  MACHINE_SIMD_GENERIC_OP_LIST(V)
 
 #define VALUE_OP_LIST(V)  \
   COMMON_OP_LIST(V)       \
diff --git a/src/compiler/operation-typer.cc b/src/compiler/operation-typer.cc
new file mode 100644
index 0000000..b2860e0
--- /dev/null
+++ b/src/compiler/operation-typer.cc
@@ -0,0 +1,424 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/operation-typer.h"
+
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/type-cache.h"
+#include "src/types.h"
+
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+OperationTyper::OperationTyper(Isolate* isolate, Zone* zone)
+    : zone_(zone), cache_(TypeCache::Get()) {
+  Factory* factory = isolate->factory();
+  singleton_false_ = Type::Constant(factory->false_value(), zone);
+  singleton_true_ = Type::Constant(factory->true_value(), zone);
+  singleton_the_hole_ = Type::Constant(factory->the_hole_value(), zone);
+}
+
+Type* OperationTyper::Merge(Type* left, Type* right) {
+  return Type::Union(left, right, zone());
+}
+
+Type* OperationTyper::WeakenRange(Type* previous_range, Type* current_range) {
+  static const double kWeakenMinLimits[] = {0.0,
+                                            -1073741824.0,
+                                            -2147483648.0,
+                                            -4294967296.0,
+                                            -8589934592.0,
+                                            -17179869184.0,
+                                            -34359738368.0,
+                                            -68719476736.0,
+                                            -137438953472.0,
+                                            -274877906944.0,
+                                            -549755813888.0,
+                                            -1099511627776.0,
+                                            -2199023255552.0,
+                                            -4398046511104.0,
+                                            -8796093022208.0,
+                                            -17592186044416.0,
+                                            -35184372088832.0,
+                                            -70368744177664.0,
+                                            -140737488355328.0,
+                                            -281474976710656.0,
+                                            -562949953421312.0};
+  static const double kWeakenMaxLimits[] = {0.0,
+                                            1073741823.0,
+                                            2147483647.0,
+                                            4294967295.0,
+                                            8589934591.0,
+                                            17179869183.0,
+                                            34359738367.0,
+                                            68719476735.0,
+                                            137438953471.0,
+                                            274877906943.0,
+                                            549755813887.0,
+                                            1099511627775.0,
+                                            2199023255551.0,
+                                            4398046511103.0,
+                                            8796093022207.0,
+                                            17592186044415.0,
+                                            35184372088831.0,
+                                            70368744177663.0,
+                                            140737488355327.0,
+                                            281474976710655.0,
+                                            562949953421311.0};
+  STATIC_ASSERT(arraysize(kWeakenMinLimits) == arraysize(kWeakenMaxLimits));
+
+  double current_min = current_range->Min();
+  double new_min = current_min;
+  // Find the closest lower entry in the list of allowed
+  // minima (or negative infinity if there is no such entry).
+  if (current_min != previous_range->Min()) {
+    new_min = -V8_INFINITY;
+    for (double const min : kWeakenMinLimits) {
+      if (min <= current_min) {
+        new_min = min;
+        break;
+      }
+    }
+  }
+
+  double current_max = current_range->Max();
+  double new_max = current_max;
+  // Find the closest greater entry in the list of allowed
+  // maxima (or infinity if there is no such entry).
+  if (current_max != previous_range->Max()) {
+    new_max = V8_INFINITY;
+    for (double const max : kWeakenMaxLimits) {
+      if (max >= current_max) {
+        new_max = max;
+        break;
+      }
+    }
+  }
+
+  return Type::Range(new_min, new_max, zone());
+}
+
+Type* OperationTyper::Rangify(Type* type) {
+  if (type->IsRange()) return type;  // Shortcut.
+  if (!type->Is(cache_.kInteger)) {
+    return type;  // Give up on non-integer types.
+  }
+  double min = type->Min();
+  double max = type->Max();
+  // Handle the degenerate case of empty bitset types (such as
+  // OtherUnsigned31 and OtherSigned32 on 64-bit architectures).
+  if (std::isnan(min)) {
+    DCHECK(std::isnan(max));
+    return type;
+  }
+  return Type::Range(min, max, zone());
+}
+
+namespace {
+
+// Returns the array's least element, ignoring NaN.
+// There must be at least one non-NaN element.
+// Any -0 is converted to 0.
+double array_min(double a[], size_t n) {
+  DCHECK(n != 0);
+  double x = +V8_INFINITY;
+  for (size_t i = 0; i < n; ++i) {
+    if (!std::isnan(a[i])) {
+      x = std::min(a[i], x);
+    }
+  }
+  DCHECK(!std::isnan(x));
+  return x == 0 ? 0 : x;  // -0 -> 0
+}
+
+// Returns the array's greatest element, ignoring NaN.
+// There must be at least one non-NaN element.
+// Any -0 is converted to 0.
+double array_max(double a[], size_t n) {
+  DCHECK(n != 0);
+  double x = -V8_INFINITY;
+  for (size_t i = 0; i < n; ++i) {
+    if (!std::isnan(a[i])) {
+      x = std::max(a[i], x);
+    }
+  }
+  DCHECK(!std::isnan(x));
+  return x == 0 ? 0 : x;  // -0 -> 0
+}
+
+}  // namespace
+
+Type* OperationTyper::AddRanger(double lhs_min, double lhs_max, double rhs_min,
+                                double rhs_max) {
+  double results[4];
+  results[0] = lhs_min + rhs_min;
+  results[1] = lhs_min + rhs_max;
+  results[2] = lhs_max + rhs_min;
+  results[3] = lhs_max + rhs_max;
+  // Since none of the inputs can be -0, the result cannot be -0 either.
+  // However, it can be nan (the sum of two infinities of opposite sign).
+  // On the other hand, if none of the "results" above is nan, then the actual
+  // result cannot be nan either.
+  int nans = 0;
+  for (int i = 0; i < 4; ++i) {
+    if (std::isnan(results[i])) ++nans;
+  }
+  if (nans == 4) return Type::NaN();  // [-inf..-inf] + [inf..inf] or vice versa
+  Type* range =
+      Type::Range(array_min(results, 4), array_max(results, 4), zone());
+  return nans == 0 ? range : Type::Union(range, Type::NaN(), zone());
+  // Examples:
+  //   [-inf, -inf] + [+inf, +inf] = NaN
+  //   [-inf, -inf] + [n, +inf] = [-inf, -inf] \/ NaN
+  //   [-inf, +inf] + [n, +inf] = [-inf, +inf] \/ NaN
+  //   [-inf, m] + [n, +inf] = [-inf, +inf] \/ NaN
+}
+
+Type* OperationTyper::SubtractRanger(RangeType* lhs, RangeType* rhs) {
+  double results[4];
+  results[0] = lhs->Min() - rhs->Min();
+  results[1] = lhs->Min() - rhs->Max();
+  results[2] = lhs->Max() - rhs->Min();
+  results[3] = lhs->Max() - rhs->Max();
+  // Since none of the inputs can be -0, the result cannot be -0.
+  // However, it can be nan (the subtraction of two infinities of same sign).
+  // On the other hand, if none of the "results" above is nan, then the actual
+  // result cannot be nan either.
+  int nans = 0;
+  for (int i = 0; i < 4; ++i) {
+    if (std::isnan(results[i])) ++nans;
+  }
+  if (nans == 4) return Type::NaN();  // [inf..inf] - [inf..inf] (all same sign)
+  Type* range =
+      Type::Range(array_min(results, 4), array_max(results, 4), zone());
+  return nans == 0 ? range : Type::Union(range, Type::NaN(), zone());
+  // Examples:
+  //   [-inf, +inf] - [-inf, +inf] = [-inf, +inf] \/ NaN
+  //   [-inf, -inf] - [-inf, -inf] = NaN
+  //   [-inf, -inf] - [n, +inf] = [-inf, -inf] \/ NaN
+  //   [m, +inf] - [-inf, n] = [-inf, +inf] \/ NaN
+}
+
+Type* OperationTyper::ModulusRanger(RangeType* lhs, RangeType* rhs) {
+  double lmin = lhs->Min();
+  double lmax = lhs->Max();
+  double rmin = rhs->Min();
+  double rmax = rhs->Max();
+
+  double labs = std::max(std::abs(lmin), std::abs(lmax));
+  double rabs = std::max(std::abs(rmin), std::abs(rmax)) - 1;
+  double abs = std::min(labs, rabs);
+  bool maybe_minus_zero = false;
+  double omin = 0;
+  double omax = 0;
+  if (lmin >= 0) {  // {lhs} positive.
+    omin = 0;
+    omax = abs;
+  } else if (lmax <= 0) {  // {lhs} negative.
+    omin = 0 - abs;
+    omax = 0;
+    maybe_minus_zero = true;
+  } else {
+    omin = 0 - abs;
+    omax = abs;
+    maybe_minus_zero = true;
+  }
+
+  Type* result = Type::Range(omin, omax, zone());
+  if (maybe_minus_zero) result = Type::Union(result, Type::MinusZero(), zone());
+  return result;
+}
+
+Type* OperationTyper::MultiplyRanger(Type* lhs, Type* rhs) {
+  double results[4];
+  double lmin = lhs->AsRange()->Min();
+  double lmax = lhs->AsRange()->Max();
+  double rmin = rhs->AsRange()->Min();
+  double rmax = rhs->AsRange()->Max();
+  results[0] = lmin * rmin;
+  results[1] = lmin * rmax;
+  results[2] = lmax * rmin;
+  results[3] = lmax * rmax;
+  // If the result may be nan, we give up on calculating a precise type,
+  // because
+  // the discontinuity makes it too complicated.  Note that even if none of
+  // the
+  // "results" above is nan, the actual result may still be, so we have to do
+  // a
+  // different check:
+  bool maybe_nan = (lhs->Maybe(cache_.kSingletonZero) &&
+                    (rmin == -V8_INFINITY || rmax == +V8_INFINITY)) ||
+                   (rhs->Maybe(cache_.kSingletonZero) &&
+                    (lmin == -V8_INFINITY || lmax == +V8_INFINITY));
+  if (maybe_nan) return cache_.kIntegerOrMinusZeroOrNaN;  // Giving up.
+  bool maybe_minuszero = (lhs->Maybe(cache_.kSingletonZero) && rmin < 0) ||
+                         (rhs->Maybe(cache_.kSingletonZero) && lmin < 0);
+  Type* range =
+      Type::Range(array_min(results, 4), array_max(results, 4), zone());
+  return maybe_minuszero ? Type::Union(range, Type::MinusZero(), zone())
+                         : range;
+}
+
+Type* OperationTyper::ToNumber(Type* type) {
+  if (type->Is(Type::Number())) return type;
+  if (type->Is(Type::NullOrUndefined())) {
+    if (type->Is(Type::Null())) return cache_.kSingletonZero;
+    if (type->Is(Type::Undefined())) return Type::NaN();
+    return Type::Union(Type::NaN(), cache_.kSingletonZero, zone());
+  }
+  if (type->Is(Type::NumberOrUndefined())) {
+    return Type::Union(Type::Intersect(type, Type::Number(), zone()),
+                       Type::NaN(), zone());
+  }
+  if (type->Is(singleton_false_)) return cache_.kSingletonZero;
+  if (type->Is(singleton_true_)) return cache_.kSingletonOne;
+  if (type->Is(Type::Boolean())) return cache_.kZeroOrOne;
+  if (type->Is(Type::BooleanOrNumber())) {
+    return Type::Union(Type::Intersect(type, Type::Number(), zone()),
+                       cache_.kZeroOrOne, zone());
+  }
+  return Type::Number();
+}
+
+Type* OperationTyper::NumericAdd(Type* lhs, Type* rhs) {
+  DCHECK(lhs->Is(Type::Number()));
+  DCHECK(rhs->Is(Type::Number()));
+
+  // We can give more precise types for integers.
+  if (!lhs->Is(cache_.kIntegerOrMinusZeroOrNaN) ||
+      !rhs->Is(cache_.kIntegerOrMinusZeroOrNaN)) {
+    return Type::Number();
+  }
+  Type* int_lhs = Type::Intersect(lhs, cache_.kInteger, zone());
+  Type* int_rhs = Type::Intersect(rhs, cache_.kInteger, zone());
+  Type* result =
+      AddRanger(int_lhs->Min(), int_lhs->Max(), int_rhs->Min(), int_rhs->Max());
+  if (lhs->Maybe(Type::NaN()) || rhs->Maybe(Type::NaN())) {
+    result = Type::Union(result, Type::NaN(), zone());
+  }
+  if (lhs->Maybe(Type::MinusZero()) && rhs->Maybe(Type::MinusZero())) {
+    result = Type::Union(result, Type::MinusZero(), zone());
+  }
+  return result;
+}
+
+Type* OperationTyper::NumericSubtract(Type* lhs, Type* rhs) {
+  DCHECK(lhs->Is(Type::Number()));
+  DCHECK(rhs->Is(Type::Number()));
+
+  lhs = Rangify(lhs);
+  rhs = Rangify(rhs);
+  if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
+  if (lhs->IsRange() && rhs->IsRange()) {
+    return SubtractRanger(lhs->AsRange(), rhs->AsRange());
+  }
+  // TODO(neis): Deal with numeric bitsets here and elsewhere.
+  return Type::Number();
+}
+
+Type* OperationTyper::NumericMultiply(Type* lhs, Type* rhs) {
+  DCHECK(lhs->Is(Type::Number()));
+  DCHECK(rhs->Is(Type::Number()));
+  lhs = Rangify(lhs);
+  rhs = Rangify(rhs);
+  if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
+  if (lhs->IsRange() && rhs->IsRange()) {
+    return MultiplyRanger(lhs, rhs);
+  }
+  return Type::Number();
+}
+
+Type* OperationTyper::NumericDivide(Type* lhs, Type* rhs) {
+  DCHECK(lhs->Is(Type::Number()));
+  DCHECK(rhs->Is(Type::Number()));
+
+  if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
+  // Division is tricky, so all we do is try ruling out nan.
+  bool maybe_nan =
+      lhs->Maybe(Type::NaN()) || rhs->Maybe(cache_.kZeroish) ||
+      ((lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY) &&
+       (rhs->Min() == -V8_INFINITY || rhs->Max() == +V8_INFINITY));
+  return maybe_nan ? Type::Number() : Type::OrderedNumber();
+}
+
+Type* OperationTyper::NumericModulus(Type* lhs, Type* rhs) {
+  DCHECK(lhs->Is(Type::Number()));
+  DCHECK(rhs->Is(Type::Number()));
+  if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
+
+  if (lhs->Maybe(Type::NaN()) || rhs->Maybe(cache_.kZeroish) ||
+      lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY) {
+    // Result maybe NaN.
+    return Type::Number();
+  }
+
+  lhs = Rangify(lhs);
+  rhs = Rangify(rhs);
+  if (lhs->IsRange() && rhs->IsRange()) {
+    return ModulusRanger(lhs->AsRange(), rhs->AsRange());
+  }
+  return Type::OrderedNumber();
+}
+
+Type* OperationTyper::ToPrimitive(Type* type) {
+  if (type->Is(Type::Primitive()) && !type->Maybe(Type::Receiver())) {
+    return type;
+  }
+  return Type::Primitive();
+}
+
+Type* OperationTyper::Invert(Type* type) {
+  DCHECK(type->Is(Type::Boolean()));
+  DCHECK(type->IsInhabited());
+  if (type->Is(singleton_false())) return singleton_true();
+  if (type->Is(singleton_true())) return singleton_false();
+  return type;
+}
+
+OperationTyper::ComparisonOutcome OperationTyper::Invert(
+    ComparisonOutcome outcome) {
+  ComparisonOutcome result(0);
+  if ((outcome & kComparisonUndefined) != 0) result |= kComparisonUndefined;
+  if ((outcome & kComparisonTrue) != 0) result |= kComparisonFalse;
+  if ((outcome & kComparisonFalse) != 0) result |= kComparisonTrue;
+  return result;
+}
+
+Type* OperationTyper::FalsifyUndefined(ComparisonOutcome outcome) {
+  if ((outcome & kComparisonFalse) != 0 ||
+      (outcome & kComparisonUndefined) != 0) {
+    return (outcome & kComparisonTrue) != 0 ? Type::Boolean()
+                                            : singleton_false();
+  }
+  // Type should be non empty, so we know it should be true.
+  DCHECK((outcome & kComparisonTrue) != 0);
+  return singleton_true();
+}
+
+Type* OperationTyper::TypeJSAdd(Type* lhs, Type* rhs) {
+  lhs = ToPrimitive(lhs);
+  rhs = ToPrimitive(rhs);
+  if (lhs->Maybe(Type::String()) || rhs->Maybe(Type::String())) {
+    if (lhs->Is(Type::String()) || rhs->Is(Type::String())) {
+      return Type::String();
+    } else {
+      return Type::NumberOrString();
+    }
+  }
+  lhs = ToNumber(lhs);
+  rhs = ToNumber(rhs);
+  return NumericAdd(lhs, rhs);
+}
+
+Type* OperationTyper::TypeJSSubtract(Type* lhs, Type* rhs) {
+  return NumericSubtract(ToNumber(lhs), ToNumber(rhs));
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/operation-typer.h b/src/compiler/operation-typer.h
new file mode 100644
index 0000000..aa669ac
--- /dev/null
+++ b/src/compiler/operation-typer.h
@@ -0,0 +1,84 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_OPERATION_TYPER_H_
+#define V8_COMPILER_OPERATION_TYPER_H_
+
+#include "src/base/flags.h"
+#include "src/compiler/opcodes.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class RangeType;
+class Type;
+class TypeCache;
+class Zone;
+
+namespace compiler {
+
+class OperationTyper {
+ public:
+  OperationTyper(Isolate* isolate, Zone* zone);
+
+  // Typing Phi.
+  Type* Merge(Type* left, Type* right);
+
+  Type* ToPrimitive(Type* type);
+
+  // Helpers for number operation typing.
+  Type* ToNumber(Type* type);
+  Type* WeakenRange(Type* current_range, Type* previous_range);
+
+  Type* NumericAdd(Type* lhs, Type* rhs);
+  Type* NumericSubtract(Type* lhs, Type* rhs);
+  Type* NumericMultiply(Type* lhs, Type* rhs);
+  Type* NumericDivide(Type* lhs, Type* rhs);
+  Type* NumericModulus(Type* lhs, Type* rhs);
+
+  enum ComparisonOutcomeFlags {
+    kComparisonTrue = 1,
+    kComparisonFalse = 2,
+    kComparisonUndefined = 4
+  };
+
+// Javascript binop typers.
+#define DECLARE_CASE(x) Type* Type##x(Type* lhs, Type* rhs);
+  JS_SIMPLE_BINOP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+
+  Type* singleton_false() { return singleton_false_; }
+  Type* singleton_true() { return singleton_true_; }
+  Type* singleton_the_hole() { return singleton_the_hole_; }
+
+ private:
+  typedef base::Flags<ComparisonOutcomeFlags> ComparisonOutcome;
+
+  ComparisonOutcome Invert(ComparisonOutcome);
+  Type* Invert(Type*);
+  Type* FalsifyUndefined(ComparisonOutcome);
+
+  Type* Rangify(Type*);
+  Type* AddRanger(double lhs_min, double lhs_max, double rhs_min,
+                  double rhs_max);
+  Type* SubtractRanger(RangeType* lhs, RangeType* rhs);
+  Type* MultiplyRanger(Type* lhs, Type* rhs);
+  Type* ModulusRanger(RangeType* lhs, RangeType* rhs);
+
+  Zone* zone() { return zone_; }
+
+  Zone* zone_;
+  TypeCache const& cache_;
+
+  Type* singleton_false_;
+  Type* singleton_true_;
+  Type* singleton_the_hole_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_OPERATION_TYPER_H_
diff --git a/src/compiler/operator-properties.cc b/src/compiler/operator-properties.cc
index 7f38ca7..43b0076 100644
--- a/src/compiler/operator-properties.cc
+++ b/src/compiler/operator-properties.cc
@@ -22,11 +22,12 @@
 // static
 int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
   switch (op->opcode()) {
+    case IrOpcode::kCheckpoint:
     case IrOpcode::kFrameState:
       return 1;
     case IrOpcode::kJSCallRuntime: {
       const CallRuntimeParameters& p = CallRuntimeParametersOf(op);
-      return Linkage::FrameStateInputCount(p.id());
+      return Linkage::NeedsFrameStateInput(p.id()) ? 1 : 0;
     }
 
     // Strict equality cannot lazily deoptimize.
@@ -34,12 +35,6 @@
     case IrOpcode::kJSStrictNotEqual:
       return 0;
 
-    // We record the frame state immediately before and immediately after every
-    // construct/function call.
-    case IrOpcode::kJSCallConstruct:
-    case IrOpcode::kJSCallFunction:
-      return 2;
-
     // Compare operations
     case IrOpcode::kJSEqual:
     case IrOpcode::kJSNotEqual:
@@ -54,6 +49,15 @@
     case IrOpcode::kJSCreateLiteralObject:
     case IrOpcode::kJSCreateLiteralRegExp:
 
+    // Property access operations
+    case IrOpcode::kJSLoadNamed:
+    case IrOpcode::kJSStoreNamed:
+    case IrOpcode::kJSLoadProperty:
+    case IrOpcode::kJSStoreProperty:
+    case IrOpcode::kJSLoadGlobal:
+    case IrOpcode::kJSStoreGlobal:
+    case IrOpcode::kJSDeleteProperty:
+
     // Context operations
     case IrOpcode::kJSCreateScriptContext:
 
@@ -65,24 +69,17 @@
     case IrOpcode::kJSToObject:
     case IrOpcode::kJSToString:
 
+    // Call operations
+    case IrOpcode::kJSCallConstruct:
+    case IrOpcode::kJSCallFunction:
+
     // Misc operations
     case IrOpcode::kJSConvertReceiver:
     case IrOpcode::kJSForInNext:
     case IrOpcode::kJSForInPrepare:
     case IrOpcode::kJSStackCheck:
-    case IrOpcode::kJSDeleteProperty:
       return 1;
 
-    // We record the frame state immediately before and immediately after
-    // every property or global variable access.
-    case IrOpcode::kJSLoadNamed:
-    case IrOpcode::kJSStoreNamed:
-    case IrOpcode::kJSLoadProperty:
-    case IrOpcode::kJSStoreProperty:
-    case IrOpcode::kJSLoadGlobal:
-    case IrOpcode::kJSStoreGlobal:
-      return 2;
-
     // Binary operators that can deopt in the middle the operation (e.g.,
     // as a result of lazy deopt in ToNumber conversion) need a second frame
     // state so that we can resume before the operation.
diff --git a/src/compiler/operator.h b/src/compiler/operator.h
index fa85d59..8f288cb 100644
--- a/src/compiler/operator.h
+++ b/src/compiler/operator.h
@@ -36,18 +36,18 @@
   // transformations for nodes that have this operator.
   enum Property {
     kNoProperties = 0,
-    kReducible = 1 << 0,    // Participates in strength reduction.
-    kCommutative = 1 << 1,  // OP(a, b) == OP(b, a) for all inputs.
-    kAssociative = 1 << 2,  // OP(a, OP(b,c)) == OP(OP(a,b), c) for all inputs.
-    kIdempotent = 1 << 3,   // OP(a); OP(a) == OP(a).
-    kNoRead = 1 << 4,       // Has no scheduling dependency on Effects
-    kNoWrite = 1 << 5,      // Does not modify any Effects and thereby
+    kCommutative = 1 << 0,  // OP(a, b) == OP(b, a) for all inputs.
+    kAssociative = 1 << 1,  // OP(a, OP(b,c)) == OP(OP(a,b), c) for all inputs.
+    kIdempotent = 1 << 2,   // OP(a); OP(a) == OP(a).
+    kNoRead = 1 << 3,       // Has no scheduling dependency on Effects
+    kNoWrite = 1 << 4,      // Does not modify any Effects and thereby
                             // create new scheduling dependencies.
-    kNoThrow = 1 << 6,      // Can never generate an exception.
+    kNoThrow = 1 << 5,      // Can never generate an exception.
+    kNoDeopt = 1 << 6,      // Can never generate an eager deoptimization exit.
     kFoldable = kNoRead | kNoWrite,
-    kKontrol = kFoldable | kNoThrow,
-    kEliminatable = kNoWrite | kNoThrow,
-    kPure = kNoRead | kNoWrite | kNoThrow | kIdempotent
+    kKontrol = kNoDeopt | kFoldable | kNoThrow,
+    kEliminatable = kNoDeopt | kNoWrite | kNoThrow,
+    kPure = kNoDeopt | kNoRead | kNoWrite | kNoThrow | kIdempotent
   };
   typedef base::Flags<Property, uint8_t> Properties;
 
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index 82583e9..d592000 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -14,6 +14,7 @@
 #include "src/compiler/basic-block-instrumentor.h"
 #include "src/compiler/branch-elimination.h"
 #include "src/compiler/bytecode-graph-builder.h"
+#include "src/compiler/checkpoint-elimination.h"
 #include "src/compiler/code-generator.h"
 #include "src/compiler/common-operator-reducer.h"
 #include "src/compiler/control-flow-optimizer.h"
@@ -25,7 +26,6 @@
 #include "src/compiler/graph-replay.h"
 #include "src/compiler/graph-trimmer.h"
 #include "src/compiler/graph-visualizer.h"
-#include "src/compiler/greedy-allocator.h"
 #include "src/compiler/instruction-selector.h"
 #include "src/compiler/instruction.h"
 #include "src/compiler/js-builtin-reducer.h"
@@ -49,6 +49,7 @@
 #include "src/compiler/move-optimizer.h"
 #include "src/compiler/osr.h"
 #include "src/compiler/pipeline-statistics.h"
+#include "src/compiler/redundancy-elimination.h"
 #include "src/compiler/register-allocator-verifier.h"
 #include "src/compiler/register-allocator.h"
 #include "src/compiler/schedule.h"
@@ -57,6 +58,7 @@
 #include "src/compiler/simplified-lowering.h"
 #include "src/compiler/simplified-operator-reducer.h"
 #include "src/compiler/simplified-operator.h"
+#include "src/compiler/store-store-elimination.h"
 #include "src/compiler/tail-call-optimization.h"
 #include "src/compiler/type-hint-analyzer.h"
 #include "src/compiler/typer.h"
@@ -521,7 +523,7 @@
                                              ZonePool* zone_pool) {
   PipelineStatistics* pipeline_statistics = nullptr;
 
-  if (FLAG_turbo_stats) {
+  if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
     pipeline_statistics = new PipelineStatistics(info, zone_pool);
     pipeline_statistics->BeginPhaseKind("initializing");
   }
@@ -533,7 +535,9 @@
     int pos = info->shared_info()->start_position();
     json_of << "{\"function\":\"" << function_name.get()
             << "\", \"sourcePosition\":" << pos << ", \"source\":\"";
-    if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+    Isolate* isolate = info->isolate();
+    if (!script->IsUndefined(isolate) &&
+        !script->source()->IsUndefined(isolate)) {
       DisallowHeapAllocation no_allocation;
       int start = info->shared_info()->start_position();
       int len = info->shared_info()->end_position() - start;
@@ -597,6 +601,9 @@
     info()->MarkAsDeoptimizationEnabled();
   }
   if (!info()->is_optimizing_from_bytecode()) {
+    if (info()->is_deoptimization_enabled() && FLAG_turbo_type_feedback) {
+      info()->MarkAsTypeFeedbackEnabled();
+    }
     if (!Compiler::EnsureDeoptimizationSupport(info())) return FAILED;
   }
 
@@ -718,7 +725,7 @@
   static const char* phase_name() { return "type hint analysis"; }
 
   void Run(PipelineData* data, Zone* temp_zone) {
-    if (!data->info()->is_optimizing_from_bytecode()) {
+    if (data->info()->is_type_feedback_enabled()) {
       TypeHintAnalyzer analyzer(data->graph_zone());
       Handle<Code> code(data->info()->shared_info()->code(), data->isolate());
       TypeHintAnalysis* type_hint_analysis = analyzer.Analyze(code);
@@ -804,7 +811,9 @@
     AddReducer(data, &graph_reducer, &native_context_specialization);
     AddReducer(data, &graph_reducer, &context_specialization);
     AddReducer(data, &graph_reducer, &call_reducer);
-    AddReducer(data, &graph_reducer, &inlining);
+    if (!data->info()->is_optimizing_from_bytecode()) {
+      AddReducer(data, &graph_reducer, &inlining);
+    }
     graph_reducer.ReduceGraph();
   }
 };
@@ -880,6 +889,9 @@
     if (data->info()->shared_info()->HasBytecodeArray()) {
       typed_lowering_flags |= JSTypedLowering::kDisableBinaryOpReduction;
     }
+    if (data->info()->is_type_feedback_enabled()) {
+      typed_lowering_flags |= JSTypedLowering::kTypeFeedbackEnabled;
+    }
     JSTypedLowering typed_lowering(&graph_reducer, data->info()->dependencies(),
                                    typed_lowering_flags, data->jsgraph(),
                                    temp_zone);
@@ -888,7 +900,8 @@
         data->info()->is_deoptimization_enabled()
             ? JSIntrinsicLowering::kDeoptimizationEnabled
             : JSIntrinsicLowering::kDeoptimizationDisabled);
-    SimplifiedOperatorReducer simple_reducer(data->jsgraph());
+    SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph());
+    CheckpointElimination checkpoint_elimination(&graph_reducer);
     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
                                          data->common(), data->machine());
     AddReducer(data, &graph_reducer, &dead_code_elimination);
@@ -900,6 +913,7 @@
     AddReducer(data, &graph_reducer, &intrinsic_lowering);
     AddReducer(data, &graph_reducer, &load_elimination);
     AddReducer(data, &graph_reducer, &simple_reducer);
+    AddReducer(data, &graph_reducer, &checkpoint_elimination);
     AddReducer(data, &graph_reducer, &common_reducer);
     graph_reducer.ReduceGraph();
   }
@@ -942,8 +956,12 @@
   static const char* phase_name() { return "representation selection"; }
 
   void Run(PipelineData* data, Zone* temp_zone) {
+    SimplifiedLowering::Flags flags =
+        data->info()->is_type_feedback_enabled()
+            ? SimplifiedLowering::kTypeFeedbackEnabled
+            : SimplifiedLowering::kNoFlag;
     SimplifiedLowering lowering(data->jsgraph(), temp_zone,
-                                data->source_positions());
+                                data->source_positions(), flags);
     lowering.LowerAllNodes();
   }
 };
@@ -956,13 +974,15 @@
     JSGenericLowering generic_lowering(data->jsgraph());
     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
                                               data->common());
-    SimplifiedOperatorReducer simple_reducer(data->jsgraph());
+    SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph());
+    RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
     ValueNumberingReducer value_numbering(temp_zone);
     MachineOperatorReducer machine_reducer(data->jsgraph());
     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
                                          data->common(), data->machine());
     AddReducer(data, &graph_reducer, &dead_code_elimination);
     AddReducer(data, &graph_reducer, &simple_reducer);
+    AddReducer(data, &graph_reducer, &redundancy_elimination);
     AddReducer(data, &graph_reducer, &generic_lowering);
     AddReducer(data, &graph_reducer, &value_numbering);
     AddReducer(data, &graph_reducer, &machine_reducer);
@@ -1012,10 +1032,26 @@
   }
 };
 
+struct StoreStoreEliminationPhase {
+  static const char* phase_name() { return "Store-store elimination"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    StoreStoreElimination store_store_elimination(data->jsgraph(), temp_zone);
+    store_store_elimination.Run();
+  }
+};
+
 struct MemoryOptimizationPhase {
   static const char* phase_name() { return "memory optimization"; }
 
   void Run(PipelineData* data, Zone* temp_zone) {
+    // The memory optimizer requires the graphs to be trimmed, so trim now.
+    GraphTrimmer trimmer(temp_zone, data->graph());
+    NodeVector roots(temp_zone);
+    data->jsgraph()->GetCachedNodes(&roots);
+    trimmer.TrimGraph(roots.begin(), roots.end());
+
+    // Optimize allocations and load/store operations.
     MemoryOptimizer optimizer(data->jsgraph(), temp_zone);
     optimizer.Optimize();
   }
@@ -1411,11 +1447,7 @@
 
     // Select representations.
     Run<RepresentationSelectionPhase>();
-    RunPrintAndVerify("Representations selected");
-
-    // Run early optimization pass.
-    Run<EarlyOptimizationPhase>();
-    RunPrintAndVerify("Early optimized");
+    RunPrintAndVerify("Representations selected", true);
   }
 
 #ifdef DEBUG
@@ -1435,6 +1467,10 @@
   RunPrintAndVerify("Untyped", true);
 #endif
 
+  // Run early optimization pass.
+  Run<EarlyOptimizationPhase>();
+  RunPrintAndVerify("Early optimized", true);
+
   data->EndPhaseKind();
 
   return true;
@@ -1448,6 +1484,11 @@
   Run<EffectControlLinearizationPhase>();
   RunPrintAndVerify("Effect and control linearized", true);
 
+  if (FLAG_turbo_store_elimination) {
+    Run<StoreStoreEliminationPhase>();
+    RunPrintAndVerify("Store-store elimination", true);
+  }
+
   Run<BranchEliminationPhase>();
   RunPrintAndVerify("Branch conditions eliminated", true);
 
@@ -1487,7 +1528,7 @@
   ZonePool zone_pool(isolate->allocator());
   PipelineData data(&zone_pool, &info, graph, schedule);
   base::SmartPointer<PipelineStatistics> pipeline_statistics;
-  if (FLAG_turbo_stats) {
+  if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
     pipeline_statistics.Reset(new PipelineStatistics(&info, &zone_pool));
     pipeline_statistics->BeginPhaseKind("stub codegen");
   }
@@ -1496,9 +1537,11 @@
   DCHECK_NOT_NULL(data.schedule());
 
   if (FLAG_trace_turbo) {
-    TurboJsonFile json_of(&info, std::ios_base::trunc);
-    json_of << "{\"function\":\"" << info.GetDebugName().get()
-            << "\", \"source\":\"\",\n\"phases\":[";
+    {
+      TurboJsonFile json_of(&info, std::ios_base::trunc);
+      json_of << "{\"function\":\"" << info.GetDebugName().get()
+              << "\", \"source\":\"\",\n\"phases\":[";
+    }
     pipeline.Run<PrintGraphPhase>("Machine");
   }
 
@@ -1539,7 +1582,7 @@
   ZonePool zone_pool(info->isolate()->allocator());
   PipelineData data(&zone_pool, info, graph, schedule);
   base::SmartPointer<PipelineStatistics> pipeline_statistics;
-  if (FLAG_turbo_stats) {
+  if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
     pipeline_statistics.Reset(new PipelineStatistics(info, &zone_pool));
     pipeline_statistics->BeginPhaseKind("test codegen");
   }
@@ -1624,9 +1667,8 @@
   bool run_verifier = FLAG_turbo_verify_allocation;
 
   // Allocate registers.
-  AllocateRegisters(
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
-      call_descriptor, run_verifier);
+  AllocateRegisters(RegisterConfiguration::Turbofan(), call_descriptor,
+                    run_verifier);
   Run<FrameElisionPhase>();
   if (data->compilation_failed()) {
     info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
@@ -1747,13 +1789,8 @@
     Run<SplinterLiveRangesPhase>();
   }
 
-  if (FLAG_turbo_greedy_regalloc) {
-    Run<AllocateGeneralRegistersPhase<GreedyAllocator>>();
-    Run<AllocateFPRegistersPhase<GreedyAllocator>>();
-  } else {
-    Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
-    Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
-  }
+  Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
+  Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
 
   if (FLAG_turbo_preprocess_ranges) {
     Run<MergeSplintersPhase>();
diff --git a/src/compiler/ppc/OWNERS b/src/compiler/ppc/OWNERS
index eb007cb..752e8e3 100644
--- a/src/compiler/ppc/OWNERS
+++ b/src/compiler/ppc/OWNERS
@@ -3,3 +3,4 @@
 joransiu@ca.ibm.com
 mbrandy@us.ibm.com
 michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/src/compiler/ppc/code-generator-ppc.cc b/src/compiler/ppc/code-generator-ppc.cc
index 8a0c585..4909414 100644
--- a/src/compiler/ppc/code-generator-ppc.cc
+++ b/src/compiler/ppc/code-generator-ppc.cc
@@ -216,7 +216,12 @@
       DCHECK_EQ(0, offset_immediate_);
       __ add(scratch1_, object_, offset_);
     }
-    __ CallStub(&stub);
+    if (must_save_lr_ && FLAG_enable_embedded_constant_pool) {
+      ConstantPoolUnavailableScope constant_pool_unavailable(masm());
+      __ CallStub(&stub);
+    } else {
+      __ CallStub(&stub);
+    }
     if (must_save_lr_) {
       // We need to save and restore lr if the frame was elided.
       __ Pop(scratch1_);
@@ -436,6 +441,34 @@
     DCHECK_EQ(LeaveRC, i.OutputRCBit());                                      \
   } while (0)
 
+#define ASSEMBLE_IEEE754_UNOP(name)                                            \
+  do {                                                                         \
+    /* TODO(bmeurer): We should really get rid of this special instruction, */ \
+    /* and generate a CallAddress instruction instead. */                      \
+    FrameScope scope(masm(), StackFrame::MANUAL);                              \
+    __ PrepareCallCFunction(0, 1, kScratchReg);                                \
+    __ MovToFloatParameter(i.InputDoubleRegister(0));                          \
+    __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()),  \
+                     0, 1);                                                    \
+    /* Move the result in the double result register. */                       \
+    __ MovFromFloatResult(i.OutputDoubleRegister());                           \
+    DCHECK_EQ(LeaveRC, i.OutputRCBit());                                       \
+  } while (0)
+
+#define ASSEMBLE_IEEE754_BINOP(name)                                           \
+  do {                                                                         \
+    /* TODO(bmeurer): We should really get rid of this special instruction, */ \
+    /* and generate a CallAddress instruction instead. */                      \
+    FrameScope scope(masm(), StackFrame::MANUAL);                              \
+    __ PrepareCallCFunction(0, 2, kScratchReg);                                \
+    __ MovToFloatParameters(i.InputDoubleRegister(0),                          \
+                           i.InputDoubleRegister(1));                          \
+    __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()),  \
+                     0, 2);                                                    \
+    /* Move the result in the double result register. */                       \
+    __ MovFromFloatResult(i.OutputDoubleRegister());                           \
+    DCHECK_EQ(LeaveRC, i.OutputRCBit());                                       \
+  } while (0)
 
 #define ASSEMBLE_FLOAT_MAX(scratch_reg)                                       \
   do {                                                                        \
@@ -874,6 +907,9 @@
       AssembleArchTableSwitch(instr);
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
       break;
+    case kArchDebugBreak:
+      __ stop("kArchDebugBreak");
+      break;
     case kArchNop:
     case kArchThrowTerminator:
       // don't emit code for nops.
@@ -1226,6 +1262,45 @@
       // and generate a CallAddress instruction instead.
       ASSEMBLE_FLOAT_MODULO();
       break;
+    case kIeee754Float64Atan:
+      ASSEMBLE_IEEE754_UNOP(atan);
+      break;
+    case kIeee754Float64Atan2:
+      ASSEMBLE_IEEE754_BINOP(atan2);
+      break;
+    case kIeee754Float64Tan:
+      ASSEMBLE_IEEE754_UNOP(tan);
+      break;
+    case kIeee754Float64Cbrt:
+      ASSEMBLE_IEEE754_UNOP(cbrt);
+      break;
+    case kIeee754Float64Sin:
+      ASSEMBLE_IEEE754_UNOP(sin);
+      break;
+    case kIeee754Float64Cos:
+      ASSEMBLE_IEEE754_UNOP(cos);
+      break;
+    case kIeee754Float64Exp:
+      ASSEMBLE_IEEE754_UNOP(exp);
+      break;
+    case kIeee754Float64Expm1:
+      ASSEMBLE_IEEE754_UNOP(expm1);
+      break;
+    case kIeee754Float64Atanh:
+      ASSEMBLE_IEEE754_UNOP(atanh);
+      break;
+    case kIeee754Float64Log:
+      ASSEMBLE_IEEE754_UNOP(log);
+      break;
+    case kIeee754Float64Log1p:
+      ASSEMBLE_IEEE754_UNOP(log1p);
+      break;
+    case kIeee754Float64Log2:
+      ASSEMBLE_IEEE754_UNOP(log2);
+      break;
+    case kIeee754Float64Log10:
+      ASSEMBLE_IEEE754_UNOP(log10);
+      break;
     case kPPC_Neg:
       __ neg(i.OutputRegister(), i.InputRegister(0), LeaveOE, i.OutputRCBit());
       break;
@@ -1308,6 +1383,12 @@
       DCHECK_EQ(SetRC, i.OutputRCBit());
       break;
 #endif
+    case kPPC_Float64SilenceNaN: {
+      DoubleRegister value = i.InputDoubleRegister(0);
+      DoubleRegister result = i.OutputDoubleRegister();
+      __ CanonicalizeNaN(result, value);
+      break;
+    }
     case kPPC_Push:
       if (instr->InputAt(0)->IsFPRegister()) {
         __ stfdu(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
@@ -1321,8 +1402,15 @@
     case kPPC_PushFrame: {
       int num_slots = i.InputInt32(1);
       if (instr->InputAt(0)->IsFPRegister()) {
-        __ StoreDoubleU(i.InputDoubleRegister(0),
+        LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
+        if (op->representation() == MachineRepresentation::kFloat64) {
+          __ StoreDoubleU(i.InputDoubleRegister(0),
                         MemOperand(sp, -num_slots * kPointerSize), r0);
+        } else {
+          DCHECK(op->representation() == MachineRepresentation::kFloat32);
+          __ StoreSingleU(i.InputDoubleRegister(0),
+                        MemOperand(sp, -num_slots * kPointerSize), r0);
+        }
       } else {
         __ StorePU(i.InputRegister(0),
                    MemOperand(sp, -num_slots * kPointerSize), r0);
@@ -1332,8 +1420,15 @@
     case kPPC_StoreToStackSlot: {
       int slot = i.InputInt32(1);
       if (instr->InputAt(0)->IsFPRegister()) {
-        __ StoreDouble(i.InputDoubleRegister(0),
-                       MemOperand(sp, slot * kPointerSize), r0);
+        LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
+        if (op->representation() == MachineRepresentation::kFloat64) {
+          __ StoreDouble(i.InputDoubleRegister(0),
+                        MemOperand(sp, slot * kPointerSize), r0);
+        } else {
+          DCHECK(op->representation() == MachineRepresentation::kFloat32);
+          __ StoreSingle(i.InputDoubleRegister(0),
+                        MemOperand(sp, slot * kPointerSize), r0);
+        }
       } else {
         __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize), r0);
       }
@@ -1929,6 +2024,7 @@
           if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
 #else
           if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
               src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
 #endif
             __ mov(dst, Operand(src.ToInt32(), src.rmode()));
@@ -1938,7 +2034,8 @@
           break;
         case Constant::kInt64:
 #if V8_TARGET_ARCH_PPC64
-          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE) {
+          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
             __ mov(dst, Operand(src.ToInt64(), src.rmode()));
           } else {
             DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
@@ -1997,17 +2094,33 @@
       __ Move(dst, src);
     } else {
       DCHECK(destination->IsFPStackSlot());
-      __ StoreDouble(src, g.ToMemOperand(destination), r0);
+      LocationOperand* op = LocationOperand::cast(source);
+      if (op->representation() == MachineRepresentation::kFloat64) {
+        __ StoreDouble(src, g.ToMemOperand(destination), r0);
+      } else {
+        __ StoreSingle(src, g.ToMemOperand(destination), r0);
+      }
     }
   } else if (source->IsFPStackSlot()) {
     DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
     MemOperand src = g.ToMemOperand(source);
     if (destination->IsFPRegister()) {
-      __ LoadDouble(g.ToDoubleRegister(destination), src, r0);
+      LocationOperand* op = LocationOperand::cast(source);
+      if (op->representation() == MachineRepresentation::kFloat64) {
+        __ LoadDouble(g.ToDoubleRegister(destination), src, r0);
+      } else {
+        __ LoadSingle(g.ToDoubleRegister(destination), src, r0);
+      }
     } else {
+      LocationOperand* op = LocationOperand::cast(source);
       DoubleRegister temp = kScratchDoubleReg;
-      __ LoadDouble(temp, src, r0);
-      __ StoreDouble(temp, g.ToMemOperand(destination), r0);
+      if (op->representation() == MachineRepresentation::kFloat64) {
+        __ LoadDouble(temp, src, r0);
+        __ StoreDouble(temp, g.ToMemOperand(destination), r0);
+      } else {
+        __ LoadSingle(temp, src, r0);
+        __ StoreSingle(temp, g.ToMemOperand(destination), r0);
+      }
     }
   } else {
     UNREACHABLE();
diff --git a/src/compiler/ppc/instruction-codes-ppc.h b/src/compiler/ppc/instruction-codes-ppc.h
index 23cd235..d697da3 100644
--- a/src/compiler/ppc/instruction-codes-ppc.h
+++ b/src/compiler/ppc/instruction-codes-ppc.h
@@ -93,6 +93,7 @@
   V(PPC_Uint32ToFloat32)           \
   V(PPC_Uint32ToDouble)            \
   V(PPC_Float32ToDouble)           \
+  V(PPC_Float64SilenceNaN)         \
   V(PPC_DoubleToInt32)             \
   V(PPC_DoubleToUint32)            \
   V(PPC_DoubleToInt64)             \
diff --git a/src/compiler/ppc/instruction-scheduler-ppc.cc b/src/compiler/ppc/instruction-scheduler-ppc.cc
index 1259a87..f41900d 100644
--- a/src/compiler/ppc/instruction-scheduler-ppc.cc
+++ b/src/compiler/ppc/instruction-scheduler-ppc.cc
@@ -92,6 +92,7 @@
     case kPPC_Uint32ToFloat32:
     case kPPC_Uint32ToDouble:
     case kPPC_Float32ToDouble:
+    case kPPC_Float64SilenceNaN:
     case kPPC_DoubleToInt32:
     case kPPC_DoubleToUint32:
     case kPPC_DoubleToInt64:
diff --git a/src/compiler/ppc/instruction-selector-ppc.cc b/src/compiler/ppc/instruction-selector-ppc.cc
index b8ca3ba..b724001 100644
--- a/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/src/compiler/ppc/instruction-selector-ppc.cc
@@ -1294,6 +1294,10 @@
 
 void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
 
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+  VisitRR(this, kPPC_Float64SilenceNaN, node);
+}
+
 
 void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
 
@@ -1310,11 +1314,24 @@
   VisitRR(this, kPPC_AbsDouble, node);
 }
 
-
 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
   VisitRR(this, kPPC_SqrtDouble | MiscField::encode(1), node);
 }
 
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+                                                  InstructionCode opcode) {
+  PPCOperandGenerator g(this);
+  Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1))
+       ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+                                                  InstructionCode opcode) {
+  PPCOperandGenerator g(this);
+  Emit(opcode, g.DefineAsFixed(node, d1),
+       g.UseFixed(node->InputAt(0), d1),
+       g.UseFixed(node->InputAt(1), d2))->MarkAsCall();
+}
 
 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
   VisitRR(this, kPPC_SqrtDouble, node);
@@ -1365,6 +1382,9 @@
   UNREACHABLE();
 }
 
+void InstructionSelector::VisitFloat32Neg(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Neg(Node* node) { UNREACHABLE(); }
 
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
@@ -1991,6 +2011,13 @@
   // We omit kWord32ShiftIsSafe as s[rl]w use 0x3f as a mask rather than 0x1f.
 }
 
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+  return MachineOperatorBuilder::AlignmentRequirements::
+      FullUnalignedAccessSupport();
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/raw-machine-assembler.cc b/src/compiler/raw-machine-assembler.cc
index 9407da6..ef23bc4 100644
--- a/src/compiler/raw-machine-assembler.cc
+++ b/src/compiler/raw-machine-assembler.cc
@@ -135,6 +135,11 @@
   current_block_ = nullptr;
 }
 
+void RawMachineAssembler::DebugBreak() { AddNode(machine()->DebugBreak()); }
+
+void RawMachineAssembler::Comment(const char* msg) {
+  AddNode(machine()->Comment(msg));
+}
 
 Node* RawMachineAssembler::CallN(CallDescriptor* desc, Node* function,
                                  Node** args) {
diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h
index 69ddd50..387e961 100644
--- a/src/compiler/raw-machine-assembler.h
+++ b/src/compiler/raw-machine-assembler.h
@@ -460,7 +460,22 @@
   }
   Node* Float64Abs(Node* a) { return AddNode(machine()->Float64Abs(), a); }
   Node* Float64Neg(Node* a) { return Float64Sub(Float64Constant(-0.0), a); }
+  Node* Float64Atan(Node* a) { return AddNode(machine()->Float64Atan(), a); }
+  Node* Float64Atan2(Node* a, Node* b) {
+    return AddNode(machine()->Float64Atan2(), a, b);
+  }
+  Node* Float64Atanh(Node* a) { return AddNode(machine()->Float64Atanh(), a); }
+  Node* Float64Cbrt(Node* a) { return AddNode(machine()->Float64Cbrt(), a); }
+  Node* Float64Cos(Node* a) { return AddNode(machine()->Float64Cos(), a); }
+  Node* Float64Exp(Node* a) { return AddNode(machine()->Float64Exp(), a); }
+  Node* Float64Expm1(Node* a) { return AddNode(machine()->Float64Expm1(), a); }
+  Node* Float64Log(Node* a) { return AddNode(machine()->Float64Log(), a); }
+  Node* Float64Log1p(Node* a) { return AddNode(machine()->Float64Log1p(), a); }
+  Node* Float64Log10(Node* a) { return AddNode(machine()->Float64Log10(), a); }
+  Node* Float64Log2(Node* a) { return AddNode(machine()->Float64Log2(), a); }
+  Node* Float64Sin(Node* a) { return AddNode(machine()->Float64Sin(), a); }
   Node* Float64Sqrt(Node* a) { return AddNode(machine()->Float64Sqrt(), a); }
+  Node* Float64Tan(Node* a) { return AddNode(machine()->Float64Tan(), a); }
   Node* Float64Equal(Node* a, Node* b) {
     return AddNode(machine()->Float64Equal(), a, b);
   }
@@ -697,6 +712,8 @@
   void Return(Node* v1, Node* v2, Node* v3);
   void Bind(RawMachineLabel* label);
   void Deoptimize(Node* state);
+  void DebugBreak();
+  void Comment(const char* msg);
 
   // Variables.
   Node* Phi(MachineRepresentation rep, Node* n1, Node* n2) {
diff --git a/src/compiler/redundancy-elimination.cc b/src/compiler/redundancy-elimination.cc
new file mode 100644
index 0000000..ae87349
--- /dev/null
+++ b/src/compiler/redundancy-elimination.cc
@@ -0,0 +1,216 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/redundancy-elimination.h"
+
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+RedundancyElimination::RedundancyElimination(Editor* editor, Zone* zone)
+    : AdvancedReducer(editor), node_checks_(zone), zone_(zone) {}
+
+RedundancyElimination::~RedundancyElimination() {}
+
+Reduction RedundancyElimination::Reduce(Node* node) {
+  switch (node->opcode()) {
+    case IrOpcode::kCheckFloat64Hole:
+    case IrOpcode::kCheckTaggedHole:
+    case IrOpcode::kCheckTaggedPointer:
+    case IrOpcode::kCheckTaggedSigned:
+    case IrOpcode::kCheckedFloat64ToInt32:
+    case IrOpcode::kCheckedInt32Add:
+    case IrOpcode::kCheckedInt32Sub:
+    case IrOpcode::kCheckedTaggedToFloat64:
+    case IrOpcode::kCheckedTaggedToInt32:
+    case IrOpcode::kCheckedUint32ToInt32:
+      return ReduceCheckNode(node);
+    case IrOpcode::kEffectPhi:
+      return ReduceEffectPhi(node);
+    case IrOpcode::kDead:
+      break;
+    case IrOpcode::kStart:
+      return ReduceStart(node);
+    default:
+      return ReduceOtherNode(node);
+  }
+  return NoChange();
+}
+
+// static
+RedundancyElimination::EffectPathChecks*
+RedundancyElimination::EffectPathChecks::Copy(Zone* zone,
+                                              EffectPathChecks const* checks) {
+  return new (zone->New(sizeof(EffectPathChecks))) EffectPathChecks(*checks);
+}
+
+// static
+RedundancyElimination::EffectPathChecks const*
+RedundancyElimination::EffectPathChecks::Empty(Zone* zone) {
+  return new (zone->New(sizeof(EffectPathChecks))) EffectPathChecks(nullptr, 0);
+}
+
+void RedundancyElimination::EffectPathChecks::Merge(
+    EffectPathChecks const* that) {
+  // Change the current check list to a longest common tail of this check
+  // list and the other list.
+
+  // First, we throw away the prefix of the longer list, so that
+  // we have lists of the same length.
+  Check* that_head = that->head_;
+  size_t that_size = that->size_;
+  while (that_size > size_) {
+    that_head = that_head->next;
+    that_size--;
+  }
+  while (size_ > that_size) {
+    head_ = head_->next;
+    size_--;
+  }
+
+  // Then we go through both lists in lock-step until we find
+  // the common tail.
+  while (head_ != that_head) {
+    DCHECK_LT(0u, size_);
+    DCHECK_NOT_NULL(head_);
+    size_--;
+    head_ = head_->next;
+    that_head = that_head->next;
+  }
+}
+
+RedundancyElimination::EffectPathChecks const*
+RedundancyElimination::EffectPathChecks::AddCheck(Zone* zone,
+                                                  Node* node) const {
+  Check* head = new (zone->New(sizeof(Check))) Check(node, head_);
+  return new (zone->New(sizeof(EffectPathChecks)))
+      EffectPathChecks(head, size_ + 1);
+}
+
+namespace {
+
+bool IsCompatibleCheck(Node const* a, Node const* b) {
+  if (a->op() != b->op()) return false;
+  for (int i = a->op()->ValueInputCount(); --i >= 0;) {
+    if (a->InputAt(i) != b->InputAt(i)) return false;
+  }
+  return true;
+}
+
+}  // namespace
+
+Node* RedundancyElimination::EffectPathChecks::LookupCheck(Node* node) const {
+  for (Check const* check = head_; check != nullptr; check = check->next) {
+    if (IsCompatibleCheck(check->node, node)) {
+      DCHECK(!check->node->IsDead());
+      return check->node;
+    }
+  }
+  return nullptr;
+}
+
+RedundancyElimination::EffectPathChecks const*
+RedundancyElimination::PathChecksForEffectNodes::Get(Node* node) const {
+  size_t const id = node->id();
+  if (id < info_for_node_.size()) return info_for_node_[id];
+  return nullptr;
+}
+
+void RedundancyElimination::PathChecksForEffectNodes::Set(
+    Node* node, EffectPathChecks const* checks) {
+  size_t const id = node->id();
+  if (id >= info_for_node_.size()) info_for_node_.resize(id + 1, nullptr);
+  info_for_node_[id] = checks;
+}
+
+Reduction RedundancyElimination::ReduceCheckNode(Node* node) {
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  EffectPathChecks const* checks = node_checks_.Get(effect);
+  // If we do not know anything about the predecessor, do not propagate just yet
+  // because we will have to recompute anyway once we compute the predecessor.
+  if (checks == nullptr) return NoChange();
+  // See if we have another check that dominates us.
+  if (Node* check = checks->LookupCheck(node)) {
+    ReplaceWithValue(node, check);
+    return Replace(check);
+  }
+  // Learn from this check.
+  return UpdateChecks(node, checks->AddCheck(zone(), node));
+}
+
+Reduction RedundancyElimination::ReduceEffectPhi(Node* node) {
+  Node* const control = NodeProperties::GetControlInput(node);
+  if (control->opcode() == IrOpcode::kLoop) {
+    // Here we rely on having only reducible loops:
+    // The loop entry edge always dominates the header, so we can just use
+    // the information from the loop entry edge.
+    return TakeChecksFromFirstEffect(node);
+  }
+  DCHECK_EQ(IrOpcode::kMerge, control->opcode());
+
+  // Shortcut for the case when we do not know anything about some input.
+  int const input_count = node->op()->EffectInputCount();
+  for (int i = 0; i < input_count; ++i) {
+    Node* const effect = NodeProperties::GetEffectInput(node, i);
+    if (node_checks_.Get(effect) == nullptr) return NoChange();
+  }
+
+  // Make a copy of the first input's checks and merge with the checks
+  // from other inputs.
+  EffectPathChecks* checks = EffectPathChecks::Copy(
+      zone(), node_checks_.Get(NodeProperties::GetEffectInput(node, 0)));
+  for (int i = 1; i < input_count; ++i) {
+    Node* const input = NodeProperties::GetEffectInput(node, i);
+    checks->Merge(node_checks_.Get(input));
+  }
+  return UpdateChecks(node, checks);
+}
+
+Reduction RedundancyElimination::ReduceStart(Node* node) {
+  return UpdateChecks(node, EffectPathChecks::Empty(zone()));
+}
+
+Reduction RedundancyElimination::ReduceOtherNode(Node* node) {
+  if (node->op()->EffectInputCount() == 1) {
+    if (node->op()->EffectOutputCount() == 1) {
+      return TakeChecksFromFirstEffect(node);
+    } else {
+      // Effect terminators should be handled specially.
+      return NoChange();
+    }
+  }
+  DCHECK_EQ(0, node->op()->EffectInputCount());
+  DCHECK_EQ(0, node->op()->EffectOutputCount());
+  return NoChange();
+}
+
+Reduction RedundancyElimination::TakeChecksFromFirstEffect(Node* node) {
+  DCHECK_EQ(1, node->op()->EffectOutputCount());
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  EffectPathChecks const* checks = node_checks_.Get(effect);
+  // If we do not know anything about the predecessor, do not propagate just yet
+  // because we will have to recompute anyway once we compute the predecessor.
+  if (checks == nullptr) return NoChange();
+  // We just propagate the information from the effect input (ideally,
+  // we would only revisit effect uses if there is change).
+  return UpdateChecks(node, checks);
+}
+
+Reduction RedundancyElimination::UpdateChecks(Node* node,
+                                              EffectPathChecks const* checks) {
+  EffectPathChecks const* original = node_checks_.Get(node);
+  // Only signal that the {node} has Changed, if the information about {checks}
+  // has changed wrt. the {original}.
+  if (checks != original) {
+    node_checks_.Set(node, checks);
+    return Changed(node);
+  }
+  return NoChange();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/redundancy-elimination.h b/src/compiler/redundancy-elimination.h
new file mode 100644
index 0000000..a4886e4
--- /dev/null
+++ b/src/compiler/redundancy-elimination.h
@@ -0,0 +1,76 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_REDUNDANCY_ELIMINATION_H_
+#define V8_COMPILER_REDUNDANCY_ELIMINATION_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class RedundancyElimination final : public AdvancedReducer {
+ public:
+  RedundancyElimination(Editor* editor, Zone* zone);
+  ~RedundancyElimination() final;
+
+  Reduction Reduce(Node* node) final;
+
+ private:
+  struct Check {
+    Check(Node* node, Check* next) : node(node), next(next) {}
+    Node* node;
+    Check* next;
+  };
+
+  class EffectPathChecks final {
+   public:
+    static EffectPathChecks* Copy(Zone* zone, EffectPathChecks const* checks);
+    static EffectPathChecks const* Empty(Zone* zone);
+    void Merge(EffectPathChecks const* that);
+
+    EffectPathChecks const* AddCheck(Zone* zone, Node* node) const;
+    Node* LookupCheck(Node* node) const;
+
+   private:
+    EffectPathChecks(Check* head, size_t size) : head_(head), size_(size) {}
+
+    // We keep track of the list length so that we can find the longest
+    // common tail easily.
+    Check* head_;
+    size_t size_;
+  };
+
+  class PathChecksForEffectNodes final {
+   public:
+    explicit PathChecksForEffectNodes(Zone* zone) : info_for_node_(zone) {}
+    EffectPathChecks const* Get(Node* node) const;
+    void Set(Node* node, EffectPathChecks const* checks);
+
+   private:
+    ZoneVector<EffectPathChecks const*> info_for_node_;
+  };
+
+  Reduction ReduceCheckNode(Node* node);
+  Reduction ReduceEffectPhi(Node* node);
+  Reduction ReduceStart(Node* node);
+  Reduction ReduceOtherNode(Node* node);
+
+  Reduction TakeChecksFromFirstEffect(Node* node);
+  Reduction UpdateChecks(Node* node, EffectPathChecks const* checks);
+
+  Zone* zone() const { return zone_; }
+
+  PathChecksForEffectNodes node_checks_;
+  Zone* const zone_;
+
+  DISALLOW_COPY_AND_ASSIGN(RedundancyElimination);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_REDUNDANCY_ELIMINATION_H_
diff --git a/src/compiler/register-allocator-verifier.cc b/src/compiler/register-allocator-verifier.cc
index 6746719..2d10de0 100644
--- a/src/compiler/register-allocator-verifier.cc
+++ b/src/compiler/register-allocator-verifier.cc
@@ -160,14 +160,14 @@
     int vreg = unallocated->virtual_register();
     constraint->virtual_register_ = vreg;
     if (unallocated->basic_policy() == UnallocatedOperand::FIXED_SLOT) {
-      constraint->type_ = sequence()->IsFloat(vreg) ? kDoubleSlot : kSlot;
+      constraint->type_ = sequence()->IsFP(vreg) ? kFPSlot : kSlot;
       constraint->value_ = unallocated->fixed_slot_index();
     } else {
       switch (unallocated->extended_policy()) {
         case UnallocatedOperand::ANY:
         case UnallocatedOperand::NONE:
-          if (sequence()->IsFloat(vreg)) {
-            constraint->type_ = kNoneDouble;
+          if (sequence()->IsFP(vreg)) {
+            constraint->type_ = kNoneFP;
           } else {
             constraint->type_ = kNone;
           }
@@ -181,19 +181,19 @@
           }
           constraint->value_ = unallocated->fixed_register_index();
           break;
-        case UnallocatedOperand::FIXED_DOUBLE_REGISTER:
-          constraint->type_ = kFixedDoubleRegister;
+        case UnallocatedOperand::FIXED_FP_REGISTER:
+          constraint->type_ = kFixedFPRegister;
           constraint->value_ = unallocated->fixed_register_index();
           break;
         case UnallocatedOperand::MUST_HAVE_REGISTER:
-          if (sequence()->IsFloat(vreg)) {
-            constraint->type_ = kDoubleRegister;
+          if (sequence()->IsFP(vreg)) {
+            constraint->type_ = kFPRegister;
           } else {
             constraint->type_ = kRegister;
           }
           break;
         case UnallocatedOperand::MUST_HAVE_SLOT:
-          constraint->type_ = sequence()->IsFloat(vreg) ? kDoubleSlot : kSlot;
+          constraint->type_ = sequence()->IsFP(vreg) ? kFPSlot : kSlot;
           break;
         case UnallocatedOperand::SAME_AS_FIRST_INPUT:
           constraint->type_ = kSameAsFirst;
@@ -223,7 +223,7 @@
     case kRegister:
       CHECK(op->IsRegister());
       return;
-    case kDoubleRegister:
+    case kFPRegister:
       CHECK(op->IsFPRegister());
       return;
     case kExplicit:
@@ -232,13 +232,11 @@
     case kFixedRegister:
     case kRegisterAndSlot:
       CHECK(op->IsRegister());
-      CHECK_EQ(LocationOperand::cast(op)->GetRegister().code(),
-               constraint->value_);
+      CHECK_EQ(LocationOperand::cast(op)->register_code(), constraint->value_);
       return;
-    case kFixedDoubleRegister:
+    case kFixedFPRegister:
       CHECK(op->IsFPRegister());
-      CHECK_EQ(LocationOperand::cast(op)->GetDoubleRegister().code(),
-               constraint->value_);
+      CHECK_EQ(LocationOperand::cast(op)->register_code(), constraint->value_);
       return;
     case kFixedSlot:
       CHECK(op->IsStackSlot());
@@ -247,13 +245,13 @@
     case kSlot:
       CHECK(op->IsStackSlot());
       return;
-    case kDoubleSlot:
+    case kFPSlot:
       CHECK(op->IsFPStackSlot());
       return;
     case kNone:
       CHECK(op->IsRegister() || op->IsStackSlot());
       return;
-    case kNoneDouble:
+    case kNoneFP:
       CHECK(op->IsFPRegister() || op->IsFPStackSlot());
       return;
     case kSameAsFirst:
diff --git a/src/compiler/register-allocator-verifier.h b/src/compiler/register-allocator-verifier.h
index 06d9029..72e6e06 100644
--- a/src/compiler/register-allocator-verifier.h
+++ b/src/compiler/register-allocator-verifier.h
@@ -89,7 +89,7 @@
   DISALLOW_COPY_AND_ASSIGN(PendingAssessment);
 };
 
-// FinalAssessmens are associated to operands that we know to be a certain
+// FinalAssessments are associated to operands that we know to be a certain
 // virtual register.
 class FinalAssessment final : public Assessment {
  public:
@@ -175,13 +175,13 @@
     kImmediate,
     kRegister,
     kFixedRegister,
-    kDoubleRegister,
-    kFixedDoubleRegister,
+    kFPRegister,
+    kFixedFPRegister,
     kSlot,
-    kDoubleSlot,
+    kFPSlot,
     kFixedSlot,
     kNone,
-    kNoneDouble,
+    kNoneFP,
     kExplicit,
     kSameAsFirst,
     kRegisterAndSlot
diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
index 4683672..9c8d999 100644
--- a/src/compiler/register-allocator.cc
+++ b/src/compiler/register-allocator.cc
@@ -33,7 +33,7 @@
 
 int GetAllocatableRegisterCount(const RegisterConfiguration* cfg,
                                 RegisterKind kind) {
-  return kind == FP_REGISTERS ? cfg->num_allocatable_aliased_double_registers()
+  return kind == FP_REGISTERS ? cfg->num_allocatable_double_registers()
                               : cfg->num_allocatable_general_registers();
 }
 
@@ -64,25 +64,31 @@
   return code->InstructionAt(block->last_instruction_index());
 }
 
-
-bool IsOutputRegisterOf(Instruction* instr, Register reg) {
+bool IsOutputRegisterOf(Instruction* instr, int code) {
   for (size_t i = 0; i < instr->OutputCount(); i++) {
     InstructionOperand* output = instr->OutputAt(i);
     if (output->IsRegister() &&
-        LocationOperand::cast(output)->GetRegister().is(reg)) {
+        LocationOperand::cast(output)->register_code() == code) {
       return true;
     }
   }
   return false;
 }
 
-
-bool IsOutputDoubleRegisterOf(Instruction* instr, DoubleRegister reg) {
+bool IsOutputFPRegisterOf(Instruction* instr, MachineRepresentation rep,
+                          int code) {
   for (size_t i = 0; i < instr->OutputCount(); i++) {
     InstructionOperand* output = instr->OutputAt(i);
-    if (output->IsFPRegister() &&
-        LocationOperand::cast(output)->GetDoubleRegister().is(reg)) {
-      return true;
+    if (output->IsFPRegister()) {
+      const LocationOperand* op = LocationOperand::cast(output);
+      if (kSimpleFPAliasing) {
+        if (op->register_code() == code) return true;
+      } else {
+        if (RegisterConfiguration::Turbofan()->AreAliases(
+                op->representation(), op->register_code(), rep, code)) {
+          return true;
+        }
+      }
     }
   }
   return false;
@@ -319,11 +325,7 @@
     case UsePositionHintType::kOperand: {
       InstructionOperand* operand =
           reinterpret_cast<InstructionOperand*>(hint_);
-      int assigned_register =
-          operand->IsRegister()
-              ? LocationOperand::cast(operand)->GetRegister().code()
-              : LocationOperand::cast(operand)->GetDoubleRegister().code();
-      *register_code = assigned_register;
+      *register_code = LocationOperand::cast(operand)->register_code();
       return true;
     }
     case UsePositionHintType::kPhi: {
@@ -413,11 +415,6 @@
   return os;
 }
 
-
-const float LiveRange::kInvalidWeight = -1;
-const float LiveRange::kMaxWeight = std::numeric_limits<float>::max();
-
-
 LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
                      TopLevelLiveRange* top_level)
     : relative_id_(relative_id),
@@ -430,10 +427,7 @@
       current_interval_(nullptr),
       last_processed_use_(nullptr),
       current_hint_position_(nullptr),
-      splitting_pointer_(nullptr),
-      size_(kInvalidSize),
-      weight_(kInvalidWeight),
-      group_(nullptr) {
+      splitting_pointer_(nullptr) {
   DCHECK(AllocatedOperand::IsSupportedRepresentation(rep));
   bits_ = AssignedRegisterField::encode(kUnassignedRegister) |
           RepresentationField::encode(rep);
@@ -699,10 +693,6 @@
   last_processed_use_ = nullptr;
   current_interval_ = nullptr;
 
-  // Invalidate size and weight of this range. The child range has them
-  // invalid at construction.
-  size_ = kInvalidSize;
-  weight_ = kInvalidWeight;
 #ifdef DEBUG
   VerifyChildStructure();
   result->VerifyChildStructure();
@@ -818,20 +808,6 @@
   return LifetimePosition::Invalid();
 }
 
-
-unsigned LiveRange::GetSize() {
-  if (size_ == kInvalidSize) {
-    size_ = 0;
-    for (const UseInterval* interval = first_interval(); interval != nullptr;
-         interval = interval->next()) {
-      size_ += (interval->end().value() - interval->start().value());
-    }
-  }
-
-  return static_cast<unsigned>(size_);
-}
-
-
 void LiveRange::Print(const RegisterConfiguration* config,
                       bool with_children) const {
   OFStream os(stdout);
@@ -846,9 +822,7 @@
 
 
 void LiveRange::Print(bool with_children) const {
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
-  Print(config, with_children);
+  Print(RegisterConfiguration::Turbofan(), with_children);
 }
 
 
@@ -1280,12 +1254,6 @@
   parent->SetSpillRange(this);
 }
 
-
-int SpillRange::ByteWidth() const {
-  return GetByteWidth(live_ranges_[0]->representation());
-}
-
-
 bool SpillRange::IsIntersectingWith(SpillRange* other) const {
   if (this->use_interval_ == nullptr || other->use_interval_ == nullptr ||
       this->End() <= other->use_interval_->start() ||
@@ -1388,7 +1356,6 @@
   }
 }
 
-
 RegisterAllocationData::RegisterAllocationData(
     const RegisterConfiguration* config, Zone* zone, Frame* frame,
     InstructionSequence* code, const char* debug_name)
@@ -1404,6 +1371,8 @@
                    allocation_zone()),
       fixed_live_ranges_(this->config()->num_general_registers(), nullptr,
                          allocation_zone()),
+      fixed_float_live_ranges_(this->config()->num_float_registers(), nullptr,
+                               allocation_zone()),
       fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr,
                                 allocation_zone()),
       spill_ranges_(code->VirtualRegisterCount(), nullptr, allocation_zone()),
@@ -1579,17 +1548,32 @@
   return spill_range;
 }
 
-
-void RegisterAllocationData::MarkAllocated(RegisterKind kind, int index) {
-  if (kind == FP_REGISTERS) {
-    assigned_double_registers_->Add(index);
-  } else {
-    DCHECK(kind == GENERAL_REGISTERS);
-    assigned_registers_->Add(index);
+void RegisterAllocationData::MarkAllocated(MachineRepresentation rep,
+                                           int index) {
+  switch (rep) {
+    case MachineRepresentation::kFloat32:
+      if (kSimpleFPAliasing) {
+        assigned_double_registers_->Add(index);
+      } else {
+        int alias_base_index = -1;
+        int aliases = config()->GetAliases(
+            rep, index, MachineRepresentation::kFloat64, &alias_base_index);
+        while (aliases--) {
+          int aliased_reg = alias_base_index + aliases;
+          assigned_double_registers_->Add(aliased_reg);
+        }
+      }
+      break;
+    case MachineRepresentation::kFloat64:
+      assigned_double_registers_->Add(index);
+      break;
+    default:
+      DCHECK(!IsFloatingPoint(rep));
+      assigned_registers_->Add(index);
+      break;
   }
 }
 
-
 bool RegisterAllocationData::IsBlockBoundary(LifetimePosition pos) const {
   return pos.IsFullStart() &&
          code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
@@ -1618,7 +1602,7 @@
     DCHECK(!IsFloatingPoint(rep));
     allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
                                  operand->fixed_register_index());
-  } else if (operand->HasFixedDoubleRegisterPolicy()) {
+  } else if (operand->HasFixedFPRegisterPolicy()) {
     DCHECK(IsFloatingPoint(rep));
     DCHECK_NE(InstructionOperand::kInvalidVirtualRegister, virtual_register);
     allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
@@ -1903,42 +1887,62 @@
   }
 }
 
-
-int LiveRangeBuilder::FixedDoubleLiveRangeID(int index) {
-  return -index - 1 - config()->num_general_registers();
+int LiveRangeBuilder::FixedFPLiveRangeID(int index, MachineRepresentation rep) {
+  switch (rep) {
+    case MachineRepresentation::kFloat32:
+      return -index - 1 - config()->num_general_registers();
+    case MachineRepresentation::kFloat64:
+      return -index - 1 - config()->num_general_registers() -
+             config()->num_float_registers();
+    default:
+      break;
+  }
+  UNREACHABLE();
+  return 0;
 }
 
-
 TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) {
   DCHECK(index < config()->num_general_registers());
   TopLevelLiveRange* result = data()->fixed_live_ranges()[index];
   if (result == nullptr) {
-    result = data()->NewLiveRange(FixedLiveRangeID(index),
-                                  InstructionSequence::DefaultRepresentation());
+    MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
+    result = data()->NewLiveRange(FixedLiveRangeID(index), rep);
     DCHECK(result->IsFixed());
     result->set_assigned_register(index);
-    data()->MarkAllocated(GENERAL_REGISTERS, index);
+    data()->MarkAllocated(rep, index);
     data()->fixed_live_ranges()[index] = result;
   }
   return result;
 }
 
-
-TopLevelLiveRange* LiveRangeBuilder::FixedDoubleLiveRangeFor(int index) {
-  DCHECK(index < config()->num_double_registers());
-  TopLevelLiveRange* result = data()->fixed_double_live_ranges()[index];
-  if (result == nullptr) {
-    result = data()->NewLiveRange(FixedDoubleLiveRangeID(index),
-                                  MachineRepresentation::kFloat64);
-    DCHECK(result->IsFixed());
-    result->set_assigned_register(index);
-    data()->MarkAllocated(FP_REGISTERS, index);
-    data()->fixed_double_live_ranges()[index] = result;
+TopLevelLiveRange* LiveRangeBuilder::FixedFPLiveRangeFor(
+    int index, MachineRepresentation rep) {
+  TopLevelLiveRange* result = nullptr;
+  if (rep == MachineRepresentation::kFloat64) {
+    DCHECK(index < config()->num_double_registers());
+    result = data()->fixed_double_live_ranges()[index];
+    if (result == nullptr) {
+      result = data()->NewLiveRange(FixedFPLiveRangeID(index, rep), rep);
+      DCHECK(result->IsFixed());
+      result->set_assigned_register(index);
+      data()->MarkAllocated(rep, index);
+      data()->fixed_double_live_ranges()[index] = result;
+    }
+  } else {
+    DCHECK(rep == MachineRepresentation::kFloat32);
+    DCHECK(index < config()->num_float_registers());
+    result = data()->fixed_float_live_ranges()[index];
+    if (result == nullptr) {
+      result = data()->NewLiveRange(FixedFPLiveRangeID(index, rep), rep);
+      DCHECK(result->IsFixed());
+      result->set_assigned_register(index);
+      data()->MarkAllocated(rep, index);
+      data()->fixed_float_live_ranges()[index] = result;
+    }
   }
   return result;
 }
 
-
 TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand) {
   if (operand->IsUnallocated()) {
     return data()->GetOrCreateLiveRangeFor(
@@ -1950,8 +1954,8 @@
     return FixedLiveRangeFor(
         LocationOperand::cast(operand)->GetRegister().code());
   } else if (operand->IsFPRegister()) {
-    return FixedDoubleLiveRangeFor(
-        LocationOperand::cast(operand)->GetDoubleRegister().code());
+    LocationOperand* op = LocationOperand::cast(operand);
+    return FixedFPLiveRangeFor(op->register_code(), op->representation());
   } else {
     return nullptr;
   }
@@ -2047,7 +2051,7 @@
     if (instr->ClobbersRegisters()) {
       for (int i = 0; i < config()->num_allocatable_general_registers(); ++i) {
         int code = config()->GetAllocatableGeneralCode(i);
-        if (!IsOutputRegisterOf(instr, Register::from_code(code))) {
+        if (!IsOutputRegisterOf(instr, code)) {
           TopLevelLiveRange* range = FixedLiveRangeFor(code);
           range->AddUseInterval(curr_position, curr_position.End(),
                                 allocation_zone());
@@ -2056,15 +2060,29 @@
     }
 
     if (instr->ClobbersDoubleRegisters()) {
-      for (int i = 0; i < config()->num_allocatable_aliased_double_registers();
-           ++i) {
+      for (int i = 0; i < config()->num_allocatable_double_registers(); ++i) {
         int code = config()->GetAllocatableDoubleCode(i);
-        if (!IsOutputDoubleRegisterOf(instr, DoubleRegister::from_code(code))) {
-          TopLevelLiveRange* range = FixedDoubleLiveRangeFor(code);
+        if (!IsOutputFPRegisterOf(instr, MachineRepresentation::kFloat64,
+                                  code)) {
+          TopLevelLiveRange* range =
+              FixedFPLiveRangeFor(code, MachineRepresentation::kFloat64);
           range->AddUseInterval(curr_position, curr_position.End(),
                                 allocation_zone());
         }
       }
+      // Preserve fixed float registers on archs with non-simple aliasing.
+      if (!kSimpleFPAliasing) {
+        for (int i = 0; i < config()->num_allocatable_float_registers(); ++i) {
+          int code = config()->GetAllocatableFloatCode(i);
+          if (!IsOutputFPRegisterOf(instr, MachineRepresentation::kFloat32,
+                                    code)) {
+            TopLevelLiveRange* range =
+                FixedFPLiveRangeFor(code, MachineRepresentation::kFloat32);
+            range->AddUseInterval(curr_position, curr_position.End(),
+                                  allocation_zone());
+          }
+        }
+      }
     }
 
     for (size_t i = 0; i < instr->InputCount(); i++) {
@@ -2184,23 +2202,24 @@
     // block.
     int phi_vreg = phi->virtual_register();
     live->Remove(phi_vreg);
-    InstructionOperand* hint = nullptr;
+    // Select the hint from the first predecessor block that preceeds this block
+    // in the rpo ordering. Prefer non-deferred blocks. The enforcement of
+    // hinting in rpo order is required because hint resolution that happens
+    // later in the compiler pipeline visits instructions in reverse rpo,
+    // relying on the fact that phis are encountered before their hints.
+    const Instruction* instr = nullptr;
     const InstructionBlock::Predecessors& predecessors = block->predecessors();
-    const InstructionBlock* predecessor_block =
-        code()->InstructionBlockAt(predecessors[0]);
-    const Instruction* instr = GetLastInstruction(code(), predecessor_block);
-    if (predecessor_block->IsDeferred()) {
-      // "Prefer the hint from the first non-deferred predecessor, if any.
-      for (size_t i = 1; i < predecessors.size(); ++i) {
-        predecessor_block = code()->InstructionBlockAt(predecessors[i]);
-        if (!predecessor_block->IsDeferred()) {
-          instr = GetLastInstruction(code(), predecessor_block);
-          break;
-        }
+    for (size_t i = 0; i < predecessors.size(); ++i) {
+      const InstructionBlock* predecessor_block =
+          code()->InstructionBlockAt(predecessors[i]);
+      if (predecessor_block->rpo_number() < block->rpo_number()) {
+        instr = GetLastInstruction(code(), predecessor_block);
+        if (!predecessor_block->IsDeferred()) break;
       }
     }
     DCHECK_NOT_NULL(instr);
 
+    InstructionOperand* hint = nullptr;
     for (MoveOperands* move : *instr->GetParallelMove(Instruction::END)) {
       InstructionOperand& to = move->destination();
       if (to.IsUnallocated() &&
@@ -2408,7 +2427,6 @@
       allocatable_register_codes_(
           GetAllocatableRegisterCodes(data->config(), kind)) {}
 
-
 LifetimePosition RegisterAllocator::GetSplitPositionForInstruction(
     const LiveRange* range, int instruction_index) {
   LifetimePosition ret = LifetimePosition::Invalid();
@@ -2577,14 +2595,6 @@
   range->Spill();
 }
 
-
-const ZoneVector<TopLevelLiveRange*>& RegisterAllocator::GetFixedRegisters()
-    const {
-  return mode() == FP_REGISTERS ? data()->fixed_double_live_ranges()
-                                : data()->fixed_live_ranges();
-}
-
-
 const char* RegisterAllocator::RegisterName(int register_code) const {
   if (mode() == GENERAL_REGISTERS) {
     return data()->config()->GetGeneralRegisterName(register_code);
@@ -2631,11 +2641,16 @@
   SortUnhandled();
   DCHECK(UnhandledIsSorted());
 
-  auto& fixed_ranges = GetFixedRegisters();
-  for (TopLevelLiveRange* current : fixed_ranges) {
-    if (current != nullptr) {
-      DCHECK_EQ(mode(), current->kind());
-      AddToInactive(current);
+  if (mode() == GENERAL_REGISTERS) {
+    for (TopLevelLiveRange* current : data()->fixed_live_ranges()) {
+      if (current != nullptr) AddToInactive(current);
+    }
+  } else {
+    for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) {
+      if (current != nullptr) AddToInactive(current);
+    }
+    for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) {
+      if (current != nullptr) AddToInactive(current);
     }
   }
 
@@ -2689,7 +2704,7 @@
 
 void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
                                                        int reg) {
-  data()->MarkAllocated(range->kind(), reg);
+  data()->MarkAllocated(range->representation(), reg);
   range->set_assigned_register(reg);
   range->SetUseHints(reg);
   if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
@@ -2803,18 +2818,37 @@
 
 
 bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
+  int num_regs = num_registers();
+  int num_codes = num_allocatable_registers();
+  const int* codes = allocatable_register_codes();
+  if (!kSimpleFPAliasing &&
+      (current->representation() == MachineRepresentation::kFloat32)) {
+    num_regs = data()->config()->num_float_registers();
+    num_codes = data()->config()->num_allocatable_float_registers();
+    codes = data()->config()->allocatable_float_codes();
+  }
   LifetimePosition free_until_pos[RegisterConfiguration::kMaxFPRegisters];
-
-  for (int i = 0; i < num_registers(); i++) {
+  for (int i = 0; i < num_regs; i++) {
     free_until_pos[i] = LifetimePosition::MaxPosition();
   }
 
   for (LiveRange* cur_active : active_live_ranges()) {
-    free_until_pos[cur_active->assigned_register()] =
-        LifetimePosition::GapFromInstructionIndex(0);
-    TRACE("Register %s is free until pos %d (1)\n",
-          RegisterName(cur_active->assigned_register()),
-          LifetimePosition::GapFromInstructionIndex(0).value());
+    int cur_reg = cur_active->assigned_register();
+    if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
+      free_until_pos[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
+      TRACE("Register %s is free until pos %d (1)\n", RegisterName(cur_reg),
+            LifetimePosition::GapFromInstructionIndex(0).value());
+    } else {
+      int alias_base_index = -1;
+      int aliases = data()->config()->GetAliases(
+          cur_active->representation(), cur_reg, current->representation(),
+          &alias_base_index);
+      while (aliases--) {
+        int aliased_reg = alias_base_index + aliases;
+        free_until_pos[aliased_reg] =
+            LifetimePosition::GapFromInstructionIndex(0);
+      }
+    }
   }
 
   for (LiveRange* cur_inactive : inactive_live_ranges()) {
@@ -2823,9 +2857,21 @@
         cur_inactive->FirstIntersection(current);
     if (!next_intersection.IsValid()) continue;
     int cur_reg = cur_inactive->assigned_register();
-    free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
-    TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
-          Min(free_until_pos[cur_reg], next_intersection).value());
+    if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
+      free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
+      TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
+            Min(free_until_pos[cur_reg], next_intersection).value());
+    } else {
+      int alias_base_index = -1;
+      int aliases = data()->config()->GetAliases(
+          cur_inactive->representation(), cur_reg, current->representation(),
+          &alias_base_index);
+      while (aliases--) {
+        int aliased_reg = alias_base_index + aliases;
+        free_until_pos[aliased_reg] =
+            Min(free_until_pos[aliased_reg], next_intersection);
+      }
+    }
   }
 
   int hint_register;
@@ -2847,9 +2893,9 @@
   }
 
   // Find the register which stays free for the longest time.
-  int reg = allocatable_register_code(0);
-  for (int i = 1; i < num_allocatable_registers(); ++i) {
-    int code = allocatable_register_code(i);
+  int reg = codes[0];
+  for (int i = 1; i < num_codes; ++i) {
+    int code = codes[i];
     if (free_until_pos[code] > free_until_pos[reg]) {
       reg = code;
     }
@@ -2869,8 +2915,8 @@
     AddToUnhandledSorted(tail);
   }
 
-  // Register reg is available at the range start and is free until
-  // the range end.
+  // Register reg is available at the range start and is free until the range
+  // end.
   DCHECK(pos >= current->End());
   TRACE("Assigning free reg %s to live range %d:%d\n", RegisterName(reg),
         current->TopLevel()->vreg(), current->relative_id());
@@ -2889,26 +2935,58 @@
     return;
   }
 
+  int num_regs = num_registers();
+  int num_codes = num_allocatable_registers();
+  const int* codes = allocatable_register_codes();
+  if (!kSimpleFPAliasing &&
+      (current->representation() == MachineRepresentation::kFloat32)) {
+    num_regs = data()->config()->num_float_registers();
+    num_codes = data()->config()->num_allocatable_float_registers();
+    codes = data()->config()->allocatable_float_codes();
+  }
+
   LifetimePosition use_pos[RegisterConfiguration::kMaxFPRegisters];
   LifetimePosition block_pos[RegisterConfiguration::kMaxFPRegisters];
-
-  for (int i = 0; i < num_registers(); i++) {
+  for (int i = 0; i < num_regs; i++) {
     use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
   }
 
   for (LiveRange* range : active_live_ranges()) {
     int cur_reg = range->assigned_register();
-    if (range->TopLevel()->IsFixed() ||
-        !range->CanBeSpilled(current->Start())) {
-      block_pos[cur_reg] = use_pos[cur_reg] =
-          LifetimePosition::GapFromInstructionIndex(0);
-    } else {
-      UsePosition* next_use =
-          range->NextUsePositionRegisterIsBeneficial(current->Start());
-      if (next_use == nullptr) {
-        use_pos[cur_reg] = range->End();
+    bool is_fixed_or_cant_spill =
+        range->TopLevel()->IsFixed() || !range->CanBeSpilled(current->Start());
+    if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
+      if (is_fixed_or_cant_spill) {
+        block_pos[cur_reg] = use_pos[cur_reg] =
+            LifetimePosition::GapFromInstructionIndex(0);
       } else {
-        use_pos[cur_reg] = next_use->pos();
+        UsePosition* next_use =
+            range->NextUsePositionRegisterIsBeneficial(current->Start());
+        if (next_use == nullptr) {
+          use_pos[cur_reg] = range->End();
+        } else {
+          use_pos[cur_reg] = next_use->pos();
+        }
+      }
+    } else {
+      int alias_base_index = -1;
+      int aliases = data()->config()->GetAliases(
+          range->representation(), cur_reg, current->representation(),
+          &alias_base_index);
+      while (aliases--) {
+        int aliased_reg = alias_base_index + aliases;
+        if (is_fixed_or_cant_spill) {
+          block_pos[aliased_reg] = use_pos[aliased_reg] =
+              LifetimePosition::GapFromInstructionIndex(0);
+        } else {
+          UsePosition* next_use =
+              range->NextUsePositionRegisterIsBeneficial(current->Start());
+          if (next_use == nullptr) {
+            use_pos[aliased_reg] = range->End();
+          } else {
+            use_pos[aliased_reg] = next_use->pos();
+          }
+        }
       }
     }
   }
@@ -2918,17 +2996,36 @@
     LifetimePosition next_intersection = range->FirstIntersection(current);
     if (!next_intersection.IsValid()) continue;
     int cur_reg = range->assigned_register();
-    if (range->TopLevel()->IsFixed()) {
-      block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
-      use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
+    bool is_fixed = range->TopLevel()->IsFixed();
+    if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
+      if (is_fixed) {
+        block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
+        use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
+      } else {
+        use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
+      }
     } else {
-      use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
+      int alias_base_index = -1;
+      int aliases = data()->config()->GetAliases(
+          range->representation(), cur_reg, current->representation(),
+          &alias_base_index);
+      while (aliases--) {
+        int aliased_reg = alias_base_index + aliases;
+        if (is_fixed) {
+          block_pos[aliased_reg] =
+              Min(block_pos[aliased_reg], next_intersection);
+          use_pos[aliased_reg] =
+              Min(block_pos[aliased_reg], use_pos[aliased_reg]);
+        } else {
+          use_pos[aliased_reg] = Min(use_pos[aliased_reg], next_intersection);
+        }
+      }
     }
   }
 
-  int reg = allocatable_register_code(0);
-  for (int i = 1; i < num_allocatable_registers(); ++i) {
-    int code = allocatable_register_code(i);
+  int reg = codes[0];
+  for (int i = 1; i < num_codes; ++i) {
+    int code = codes[i];
     if (use_pos[code] > use_pos[reg]) {
       reg = code;
     }
@@ -2974,45 +3071,61 @@
   LifetimePosition split_pos = current->Start();
   for (size_t i = 0; i < active_live_ranges().size(); ++i) {
     LiveRange* range = active_live_ranges()[i];
-    if (range->assigned_register() == reg) {
-      UsePosition* next_pos = range->NextRegisterPosition(current->Start());
-      LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
-      if (next_pos == nullptr) {
-        SpillAfter(range, spill_pos);
-      } else {
-        // When spilling between spill_pos and next_pos ensure that the range
-        // remains spilled at least until the start of the current live range.
-        // This guarantees that we will not introduce new unhandled ranges that
-        // start before the current range as this violates allocation invariant
-        // and will lead to an inconsistent state of active and inactive
-        // live-ranges: ranges are allocated in order of their start positions,
-        // ranges are retired from active/inactive when the start of the
-        // current live-range is larger than their end.
-        DCHECK(LifetimePosition::ExistsGapPositionBetween(current->Start(),
-                                                          next_pos->pos()));
-        SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
+    if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
+      if (range->assigned_register() != reg) continue;
+    } else {
+      if (!data()->config()->AreAliases(current->representation(), reg,
+                                        range->representation(),
+                                        range->assigned_register())) {
+        continue;
       }
-      ActiveToHandled(range);
-      --i;
     }
+
+    UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+    LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
+    if (next_pos == nullptr) {
+      SpillAfter(range, spill_pos);
+    } else {
+      // When spilling between spill_pos and next_pos ensure that the range
+      // remains spilled at least until the start of the current live range.
+      // This guarantees that we will not introduce new unhandled ranges that
+      // start before the current range as this violates allocation invariants
+      // and will lead to an inconsistent state of active and inactive
+      // live-ranges: ranges are allocated in order of their start positions,
+      // ranges are retired from active/inactive when the start of the
+      // current live-range is larger than their end.
+      DCHECK(LifetimePosition::ExistsGapPositionBetween(current->Start(),
+                                                        next_pos->pos()));
+      SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
+    }
+    ActiveToHandled(range);
+    --i;
   }
 
   for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
     LiveRange* range = inactive_live_ranges()[i];
     DCHECK(range->End() > current->Start());
-    if (range->assigned_register() == reg && !range->TopLevel()->IsFixed()) {
-      LifetimePosition next_intersection = range->FirstIntersection(current);
-      if (next_intersection.IsValid()) {
-        UsePosition* next_pos = range->NextRegisterPosition(current->Start());
-        if (next_pos == nullptr) {
-          SpillAfter(range, split_pos);
-        } else {
-          next_intersection = Min(next_intersection, next_pos->pos());
-          SpillBetween(range, split_pos, next_intersection);
-        }
-        InactiveToHandled(range);
-        --i;
+    if (range->TopLevel()->IsFixed()) continue;
+    if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
+      if (range->assigned_register() != reg) continue;
+    } else {
+      if (!data()->config()->AreAliases(current->representation(), reg,
+                                        range->representation(),
+                                        range->assigned_register()))
+        continue;
+    }
+
+    LifetimePosition next_intersection = range->FirstIntersection(current);
+    if (next_intersection.IsValid()) {
+      UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+      if (next_pos == nullptr) {
+        SpillAfter(range, split_pos);
+      } else {
+        next_intersection = Min(next_intersection, next_pos->pos());
+        SpillBetween(range, split_pos, next_intersection);
       }
+      InactiveToHandled(range);
+      --i;
     }
   }
 }
@@ -3192,8 +3305,7 @@
     if (range == nullptr || range->IsEmpty()) continue;
     // Allocate a new operand referring to the spill slot.
     if (!range->HasSlot()) {
-      int byte_width = range->ByteWidth();
-      int index = data()->frame()->AllocateSpillSlot(byte_width);
+      int index = data()->frame()->AllocateSpillSlot(range->byte_width());
       range->set_assigned_slot(index);
     }
   }
diff --git a/src/compiler/register-allocator.h b/src/compiler/register-allocator.h
index c67d60e..caadcba 100644
--- a/src/compiler/register-allocator.h
+++ b/src/compiler/register-allocator.h
@@ -412,19 +412,9 @@
   void SetUseHints(int register_index);
   void UnsetUseHints() { SetUseHints(kUnassignedRegister); }
 
-  // Used solely by the Greedy Allocator:
-  unsigned GetSize();
-  float weight() const { return weight_; }
-  void set_weight(float weight) { weight_ = weight; }
-  LiveRangeGroup* group() const { return group_; }
-  void set_group(LiveRangeGroup* group) { group_ = group; }
   void Print(const RegisterConfiguration* config, bool with_children) const;
   void Print(bool with_children) const;
 
-  static const int kInvalidSize = -1;
-  static const float kInvalidWeight;
-  static const float kMaxWeight;
-
  private:
   friend class TopLevelLiveRange;
   explicit LiveRange(int relative_id, MachineRepresentation rep,
@@ -461,17 +451,6 @@
   mutable UsePosition* current_hint_position_;
   // Cache the last position splintering stopped at.
   mutable UsePosition* splitting_pointer_;
-  // greedy: the number of LifetimePositions covered by this range. Used to
-  // prioritize selecting live ranges for register assignment, as well as
-  // in weight calculations.
-  int size_;
-
-  // greedy: a metric for resolving conflicts between ranges with an assigned
-  // register and ranges that intersect them and need a register.
-  float weight_;
-
-  // greedy: groupping
-  LiveRangeGroup* group_;
 
   DISALLOW_COPY_AND_ASSIGN(LiveRange);
 };
@@ -483,7 +462,6 @@
   ZoneVector<LiveRange*>& ranges() { return ranges_; }
   const ZoneVector<LiveRange*>& ranges() const { return ranges_; }
 
-  // TODO(mtrofin): populate assigned register and use in weight calculation.
   int assigned_register() const { return assigned_register_; }
   void set_assigned_register(int reg) { assigned_register_ = reg; }
 
@@ -700,8 +678,7 @@
   SpillRange(TopLevelLiveRange* range, Zone* zone);
 
   UseInterval* interval() const { return use_interval_; }
-  // Currently, only 4 or 8 byte slots are supported.
-  int ByteWidth() const;
+
   bool IsEmpty() const { return live_ranges_.empty(); }
   bool TryMerge(SpillRange* other);
   bool HasSlot() const { return assigned_slot_ != kUnassignedSlot; }
@@ -790,6 +767,12 @@
   ZoneVector<TopLevelLiveRange*>& fixed_live_ranges() {
     return fixed_live_ranges_;
   }
+  ZoneVector<TopLevelLiveRange*>& fixed_float_live_ranges() {
+    return fixed_float_live_ranges_;
+  }
+  const ZoneVector<TopLevelLiveRange*>& fixed_float_live_ranges() const {
+    return fixed_float_live_ranges_;
+  }
   ZoneVector<TopLevelLiveRange*>& fixed_double_live_ranges() {
     return fixed_double_live_ranges_;
   }
@@ -801,7 +784,7 @@
   ZoneVector<SpillRange*>& spill_ranges() { return spill_ranges_; }
   DelayedReferences& delayed_references() { return delayed_references_; }
   InstructionSequence* code() const { return code_; }
-  // This zone is for datastructures only needed during register allocation
+  // This zone is for data structures only needed during register allocation
   // phases.
   Zone* allocation_zone() const { return allocation_zone_; }
   // This zone is for InstructionOperands and moves that live beyond register
@@ -832,7 +815,7 @@
   bool ExistsUseWithoutDefinition();
   bool RangesDefinedInDeferredStayInDeferred();
 
-  void MarkAllocated(RegisterKind kind, int index);
+  void MarkAllocated(MachineRepresentation rep, int index);
 
   PhiMapValue* InitializePhiMap(const InstructionBlock* block,
                                 PhiInstruction* phi);
@@ -857,6 +840,7 @@
   ZoneVector<BitVector*> live_out_sets_;
   ZoneVector<TopLevelLiveRange*> live_ranges_;
   ZoneVector<TopLevelLiveRange*> fixed_live_ranges_;
+  ZoneVector<TopLevelLiveRange*> fixed_float_live_ranges_;
   ZoneVector<TopLevelLiveRange*> fixed_double_live_ranges_;
   ZoneVector<SpillRange*> spill_ranges_;
   DelayedReferences delayed_references_;
@@ -933,9 +917,9 @@
   void ProcessLoopHeader(const InstructionBlock* block, BitVector* live);
 
   static int FixedLiveRangeID(int index) { return -index - 1; }
-  int FixedDoubleLiveRangeID(int index);
+  int FixedFPLiveRangeID(int index, MachineRepresentation rep);
   TopLevelLiveRange* FixedLiveRangeFor(int index);
-  TopLevelLiveRange* FixedDoubleLiveRangeFor(int index);
+  TopLevelLiveRange* FixedFPLiveRangeFor(int index, MachineRepresentation rep);
 
   void MapPhiHint(InstructionOperand* operand, UsePosition* use_pos);
   void ResolvePhiHint(InstructionOperand* operand, UsePosition* use_pos);
@@ -969,7 +953,7 @@
 
 class RegisterAllocator : public ZoneObject {
  public:
-  explicit RegisterAllocator(RegisterAllocationData* data, RegisterKind kind);
+  RegisterAllocator(RegisterAllocationData* data, RegisterKind kind);
 
  protected:
   RegisterAllocationData* data() const { return data_; }
@@ -977,8 +961,8 @@
   RegisterKind mode() const { return mode_; }
   int num_registers() const { return num_registers_; }
   int num_allocatable_registers() const { return num_allocatable_registers_; }
-  int allocatable_register_code(int allocatable_index) const {
-    return allocatable_register_codes_[allocatable_index];
+  const int* allocatable_register_codes() const {
+    return allocatable_register_codes_;
   }
 
   // TODO(mtrofin): explain why splitting in gap START is always OK.
@@ -1031,6 +1015,9 @@
   int num_allocatable_registers_;
   const int* allocatable_register_codes_;
 
+ private:
+  bool no_combining_;
+
   DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
 };
 
diff --git a/src/compiler/representation-change.cc b/src/compiler/representation-change.cc
index 180355d..d1aa5af 100644
--- a/src/compiler/representation-change.cc
+++ b/src/compiler/representation-change.cc
@@ -105,46 +105,55 @@
 
 }  // namespace
 
-
 // Changes representation from {output_rep} to {use_rep}. The {truncation}
 // parameter is only used for sanity checking - if the changer cannot figure
 // out signedness for the word32->float64 conversion, then we check that the
 // uses truncate to word32 (so they do not care about signedness).
 Node* RepresentationChanger::GetRepresentationFor(
     Node* node, MachineRepresentation output_rep, Type* output_type,
-    MachineRepresentation use_rep, Truncation truncation) {
+    Node* use_node, UseInfo use_info) {
   if (output_rep == MachineRepresentation::kNone) {
     // The output representation should be set.
-    return TypeError(node, output_rep, output_type, use_rep);
+    return TypeError(node, output_rep, output_type, use_info.representation());
   }
-  if (use_rep == output_rep) {
-    // Representations are the same. That's a no-op.
-    return node;
+
+  // Handle the no-op shortcuts when no checking is necessary.
+  if (use_info.type_check() == TypeCheckKind::kNone ||
+      output_rep != MachineRepresentation::kWord32) {
+    if (use_info.representation() == output_rep) {
+      // Representations are the same. That's a no-op.
+      return node;
+    }
+    if (IsWord(use_info.representation()) && IsWord(output_rep)) {
+      // Both are words less than or equal to 32-bits.
+      // Since loads of integers from memory implicitly sign or zero extend the
+      // value to the full machine word size and stores implicitly truncate,
+      // no representation change is necessary.
+      return node;
+    }
   }
-  if (IsWord(use_rep) && IsWord(output_rep)) {
-    // Both are words less than or equal to 32-bits.
-    // Since loads of integers from memory implicitly sign or zero extend the
-    // value to the full machine word size and stores implicitly truncate,
-    // no representation change is necessary.
-    return node;
-  }
-  switch (use_rep) {
+
+  switch (use_info.representation()) {
     case MachineRepresentation::kTagged:
+      DCHECK(use_info.type_check() == TypeCheckKind::kNone);
       return GetTaggedRepresentationFor(node, output_rep, output_type);
     case MachineRepresentation::kFloat32:
+      DCHECK(use_info.type_check() == TypeCheckKind::kNone);
       return GetFloat32RepresentationFor(node, output_rep, output_type,
-                                         truncation);
+                                         use_info.truncation());
     case MachineRepresentation::kFloat64:
       return GetFloat64RepresentationFor(node, output_rep, output_type,
-                                         truncation);
+                                         use_node, use_info);
     case MachineRepresentation::kBit:
+      DCHECK(use_info.type_check() == TypeCheckKind::kNone);
       return GetBitRepresentationFor(node, output_rep, output_type);
     case MachineRepresentation::kWord8:
     case MachineRepresentation::kWord16:
     case MachineRepresentation::kWord32:
-      return GetWord32RepresentationFor(node, output_rep, output_type,
-                                        truncation);
+      return GetWord32RepresentationFor(node, output_rep, output_type, use_node,
+                                        use_info);
     case MachineRepresentation::kWord64:
+      DCHECK(use_info.type_check() == TypeCheckKind::kNone);
       return GetWord64RepresentationFor(node, output_rep, output_type);
     case MachineRepresentation::kSimd128:  // Fall through.
       // TODO(bbudge) Handle conversions between tagged and untagged.
@@ -156,7 +165,6 @@
   return nullptr;
 }
 
-
 Node* RepresentationChanger::GetTaggedRepresentationFor(
     Node* node, MachineRepresentation output_rep, Type* output_type) {
   // Eagerly fold representation changes for constants.
@@ -271,8 +279,12 @@
     }
   } else if (output_rep == MachineRepresentation::kTagged) {
     if (output_type->Is(Type::NumberOrUndefined())) {
-      op = simplified()
-               ->ChangeTaggedToFloat64();  // tagged -> float64 -> float32
+      // tagged -> float64 -> float32
+      if (output_type->Is(Type::Number())) {
+        op = simplified()->ChangeTaggedToFloat64();
+      } else {
+        op = simplified()->TruncateTaggedToFloat64();
+      }
       node = jsgraph()->graph()->NewNode(op, node);
       op = machine()->TruncateFloat64ToFloat32();
     }
@@ -286,29 +298,31 @@
   return jsgraph()->graph()->NewNode(op, node);
 }
 
-
 Node* RepresentationChanger::GetFloat64RepresentationFor(
     Node* node, MachineRepresentation output_rep, Type* output_type,
-    Truncation truncation) {
+    Node* use_node, UseInfo use_info) {
   // Eagerly fold representation changes for constants.
-  switch (node->opcode()) {
-    case IrOpcode::kNumberConstant:
-      return jsgraph()->Float64Constant(OpParameter<double>(node));
-    case IrOpcode::kInt32Constant:
-      if (output_type->Is(Type::Signed32())) {
-        int32_t value = OpParameter<int32_t>(node);
-        return jsgraph()->Float64Constant(value);
-      } else {
-        DCHECK(output_type->Is(Type::Unsigned32()));
-        uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
-        return jsgraph()->Float64Constant(static_cast<double>(value));
-      }
-    case IrOpcode::kFloat64Constant:
-      return node;  // No change necessary.
-    case IrOpcode::kFloat32Constant:
-      return jsgraph()->Float64Constant(OpParameter<float>(node));
-    default:
-      break;
+  if ((use_info.type_check() == TypeCheckKind::kNone)) {
+    // TODO(jarin) Handle checked constant conversions.
+    switch (node->opcode()) {
+      case IrOpcode::kNumberConstant:
+        return jsgraph()->Float64Constant(OpParameter<double>(node));
+      case IrOpcode::kInt32Constant:
+        if (output_type->Is(Type::Signed32())) {
+          int32_t value = OpParameter<int32_t>(node);
+          return jsgraph()->Float64Constant(value);
+        } else {
+          DCHECK(output_type->Is(Type::Unsigned32()));
+          uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
+          return jsgraph()->Float64Constant(static_cast<double>(value));
+        }
+      case IrOpcode::kFloat64Constant:
+        return node;  // No change necessary.
+      case IrOpcode::kFloat32Constant:
+        return jsgraph()->Float64Constant(OpParameter<float>(node));
+      default:
+        break;
+    }
   }
   // Select the correct X -> Float64 operator.
   const Operator* op = nullptr;
@@ -316,7 +330,7 @@
     if (output_type->Is(Type::Signed32())) {
       op = machine()->ChangeInt32ToFloat64();
     } else if (output_type->Is(Type::Unsigned32()) ||
-               truncation.TruncatesToWord32()) {
+               use_info.truncation().TruncatesToWord32()) {
       // Either the output is uint32 or the uses only care about the
       // low 32 bits (so we can pick uint32 safely).
       op = machine()->ChangeUint32ToFloat64();
@@ -328,8 +342,13 @@
     } else if (output_type->Is(Type::TaggedSigned())) {
       node = InsertChangeTaggedSignedToInt32(node);
       op = machine()->ChangeInt32ToFloat64();
-    } else if (output_type->Is(Type::NumberOrUndefined())) {
+    } else if (output_type->Is(Type::Number())) {
       op = simplified()->ChangeTaggedToFloat64();
+    } else if (output_type->Is(Type::NumberOrUndefined())) {
+      // TODO(jarin) Here we should check that truncation is Number.
+      op = simplified()->TruncateTaggedToFloat64();
+    } else if (use_info.type_check() == TypeCheckKind::kNumberOrUndefined) {
+      op = simplified()->CheckedTaggedToFloat64();
     }
   } else if (output_rep == MachineRepresentation::kFloat32) {
     op = machine()->ChangeFloat32ToFloat64();
@@ -338,29 +357,43 @@
     return TypeError(node, output_rep, output_type,
                      MachineRepresentation::kFloat64);
   }
-  return jsgraph()->graph()->NewNode(op, node);
+  return InsertConversion(node, op, use_node);
 }
 
-
 Node* RepresentationChanger::MakeTruncatedInt32Constant(double value) {
   return jsgraph()->Int32Constant(DoubleToInt32(value));
 }
 
 Node* RepresentationChanger::GetWord32RepresentationFor(
     Node* node, MachineRepresentation output_rep, Type* output_type,
-    Truncation truncation) {
+    Node* use_node, UseInfo use_info) {
   // Eagerly fold representation changes for constants.
   switch (node->opcode()) {
     case IrOpcode::kInt32Constant:
       return node;  // No change necessary.
-    case IrOpcode::kFloat32Constant:
-      return MakeTruncatedInt32Constant(OpParameter<float>(node));
+    case IrOpcode::kFloat32Constant: {
+      float const fv = OpParameter<float>(node);
+      if (use_info.type_check() == TypeCheckKind::kNone ||
+          (use_info.type_check() == TypeCheckKind::kSigned32 &&
+           IsInt32Double(fv))) {
+        return MakeTruncatedInt32Constant(fv);
+      }
+      break;
+    }
     case IrOpcode::kNumberConstant:
-    case IrOpcode::kFloat64Constant:
-      return MakeTruncatedInt32Constant(OpParameter<double>(node));
+    case IrOpcode::kFloat64Constant: {
+      double const fv = OpParameter<double>(node);
+      if (use_info.type_check() == TypeCheckKind::kNone ||
+          (use_info.type_check() == TypeCheckKind::kSigned32 &&
+           IsInt32Double(fv))) {
+        return MakeTruncatedInt32Constant(fv);
+      }
+      break;
+    }
     default:
       break;
   }
+
   // Select the correct X -> Word32 operator.
   const Operator* op = nullptr;
   if (output_rep == MachineRepresentation::kBit) {
@@ -370,8 +403,10 @@
       op = machine()->ChangeFloat64ToUint32();
     } else if (output_type->Is(Type::Signed32())) {
       op = machine()->ChangeFloat64ToInt32();
-    } else if (truncation.TruncatesToWord32()) {
+    } else if (use_info.truncation().TruncatesToWord32()) {
       op = machine()->TruncateFloat64ToWord32();
+    } else if (use_info.type_check() == TypeCheckKind::kSigned32) {
+      op = simplified()->CheckedFloat64ToInt32();
     }
   } else if (output_rep == MachineRepresentation::kFloat32) {
     node = InsertChangeFloat32ToFloat64(node);  // float32 -> float64 -> int32
@@ -379,8 +414,10 @@
       op = machine()->ChangeFloat64ToUint32();
     } else if (output_type->Is(Type::Signed32())) {
       op = machine()->ChangeFloat64ToInt32();
-    } else if (truncation.TruncatesToWord32()) {
+    } else if (use_info.truncation().TruncatesToWord32()) {
       op = machine()->TruncateFloat64ToWord32();
+    } else if (use_info.type_check() == TypeCheckKind::kSigned32) {
+      op = simplified()->CheckedFloat64ToInt32();
     }
   } else if (output_rep == MachineRepresentation::kTagged) {
     if (output_type->Is(Type::TaggedSigned())) {
@@ -389,14 +426,45 @@
       op = simplified()->ChangeTaggedToUint32();
     } else if (output_type->Is(Type::Signed32())) {
       op = simplified()->ChangeTaggedToInt32();
-    } else if (truncation.TruncatesToWord32()) {
+    } else if (use_info.truncation().TruncatesToWord32()) {
       op = simplified()->TruncateTaggedToWord32();
+    } else if (use_info.type_check() == TypeCheckKind::kSigned32) {
+      op = simplified()->CheckedTaggedToInt32();
     }
+  } else if (output_rep == MachineRepresentation::kWord32) {
+    // Only the checked case should get here, the non-checked case is
+    // handled in GetRepresentationFor.
+    DCHECK(use_info.type_check() == TypeCheckKind::kSigned32);
+    if (output_type->Is(Type::Signed32())) {
+      return node;
+    } else if (output_type->Is(Type::Unsigned32())) {
+      op = simplified()->CheckedUint32ToInt32();
+    }
+  } else if (output_rep == MachineRepresentation::kWord8 ||
+             output_rep == MachineRepresentation::kWord16) {
+    DCHECK(use_info.representation() == MachineRepresentation::kWord32);
+    DCHECK(use_info.type_check() == TypeCheckKind::kSigned32);
+    return node;
   }
+
   if (op == nullptr) {
     return TypeError(node, output_rep, output_type,
                      MachineRepresentation::kWord32);
   }
+  return InsertConversion(node, op, use_node);
+}
+
+Node* RepresentationChanger::InsertConversion(Node* node, const Operator* op,
+                                              Node* use_node) {
+  if (op->ControlInputCount() > 0) {
+    // If the operator can deoptimize (which means it has control
+    // input), we need to connect it to the effect and control chains.
+    Node* effect = NodeProperties::GetEffectInput(use_node);
+    Node* control = NodeProperties::GetControlInput(use_node);
+    Node* conversion = jsgraph()->graph()->NewNode(op, node, effect, control);
+    NodeProperties::ReplaceEffectInput(use_node, conversion);
+    return conversion;
+  }
   return jsgraph()->graph()->NewNode(op, node);
 }
 
@@ -426,7 +494,6 @@
   return jsgraph()->graph()->NewNode(op, node);
 }
 
-
 Node* RepresentationChanger::GetWord64RepresentationFor(
     Node* node, MachineRepresentation output_rep, Type* output_type) {
   if (output_rep == MachineRepresentation::kBit) {
@@ -437,18 +504,90 @@
                    MachineRepresentation::kWord64);
 }
 
+Node* RepresentationChanger::GetCheckedWord32RepresentationFor(
+    Node* node, MachineRepresentation output_rep, Type* output_type,
+    Node* use_node, Truncation truncation, TypeCheckKind check) {
+  // TODO(jarin) Eagerly fold constants (or insert hard deopt if the constant
+  // does not pass the check).
+
+  // If the input is already Signed32 in Word32 representation, we do not
+  // have to do anything. (We could fold this into the big if below, but
+  // it feels nicer to have the shortcut return first).
+  if (output_rep == MachineRepresentation::kWord32 ||
+      output_type->Is(Type::Signed32())) {
+    return node;
+  }
+
+  // Select the correct X -> Word32 operator.
+  const Operator* op = nullptr;
+  if (output_rep == MachineRepresentation::kWord32) {
+    if (output_type->Is(Type::Unsigned32())) {
+      op = simplified()->CheckedUint32ToInt32();
+    }
+  } else if (output_rep == MachineRepresentation::kBit) {
+    return node;  // Sloppy comparison -> word32
+  } else if (output_rep == MachineRepresentation::kFloat64) {
+    if (output_type->Is(Type::Unsigned32())) {
+      op = machine()->ChangeFloat64ToUint32();
+    } else if (output_type->Is(Type::Signed32())) {
+      op = machine()->ChangeFloat64ToInt32();
+    } else if (truncation.TruncatesToWord32()) {
+      op = machine()->TruncateFloat64ToWord32();
+    } else if (check == TypeCheckKind::kSigned32) {
+      op = simplified()->CheckedFloat64ToInt32();
+    }
+  } else if (output_rep == MachineRepresentation::kFloat32) {
+    node = InsertChangeFloat32ToFloat64(node);  // float32 -> float64 -> int32
+    if (output_type->Is(Type::Unsigned32())) {
+      op = machine()->ChangeFloat64ToUint32();
+    } else if (output_type->Is(Type::Signed32())) {
+      op = machine()->ChangeFloat64ToInt32();
+    } else if (truncation.TruncatesToWord32()) {
+      op = machine()->TruncateFloat64ToWord32();
+    } else if (check == TypeCheckKind::kSigned32) {
+      op = simplified()->CheckedFloat64ToInt32();
+    }
+  } else if (output_rep == MachineRepresentation::kTagged) {
+    if (output_type->Is(Type::TaggedSigned())) {
+      op = simplified()->ChangeTaggedSignedToInt32();
+    } else if (output_type->Is(Type::Unsigned32())) {
+      op = simplified()->ChangeTaggedToUint32();
+    } else if (output_type->Is(Type::Signed32())) {
+      op = simplified()->ChangeTaggedToInt32();
+    } else if (truncation.TruncatesToWord32()) {
+      op = simplified()->TruncateTaggedToWord32();
+    } else if (check == TypeCheckKind::kSigned32) {
+      op = simplified()->CheckedTaggedToInt32();
+    }
+  }
+  if (op == nullptr) {
+    return TypeError(node, output_rep, output_type,
+                     MachineRepresentation::kWord32);
+  }
+  if (op->ControlInputCount() > 0) {
+    // If the operator can deoptimize (which means it has control
+    // input), we need to connect it to the effect and control chains.
+    UNIMPLEMENTED();
+  }
+  return jsgraph()->graph()->NewNode(op, node);
+}
 
 const Operator* RepresentationChanger::Int32OperatorFor(
     IrOpcode::Value opcode) {
   switch (opcode) {
+    case IrOpcode::kSpeculativeNumberAdd:  // Fall through.
     case IrOpcode::kNumberAdd:
       return machine()->Int32Add();
+    case IrOpcode::kSpeculativeNumberSubtract:  // Fall through.
     case IrOpcode::kNumberSubtract:
       return machine()->Int32Sub();
+    case IrOpcode::kSpeculativeNumberMultiply:
     case IrOpcode::kNumberMultiply:
       return machine()->Int32Mul();
+    case IrOpcode::kSpeculativeNumberDivide:
     case IrOpcode::kNumberDivide:
       return machine()->Int32Div();
+    case IrOpcode::kSpeculativeNumberModulus:
     case IrOpcode::kNumberModulus:
       return machine()->Int32Mod();
     case IrOpcode::kNumberBitwiseOr:
@@ -458,10 +597,13 @@
     case IrOpcode::kNumberBitwiseAnd:
       return machine()->Word32And();
     case IrOpcode::kNumberEqual:
+    case IrOpcode::kSpeculativeNumberEqual:
       return machine()->Word32Equal();
     case IrOpcode::kNumberLessThan:
+    case IrOpcode::kSpeculativeNumberLessThan:
       return machine()->Int32LessThan();
     case IrOpcode::kNumberLessThanOrEqual:
+    case IrOpcode::kSpeculativeNumberLessThanOrEqual:
       return machine()->Int32LessThanOrEqual();
     default:
       UNREACHABLE();
@@ -469,6 +611,18 @@
   }
 }
 
+const Operator* RepresentationChanger::Int32OverflowOperatorFor(
+    IrOpcode::Value opcode) {
+  switch (opcode) {
+    case IrOpcode::kSpeculativeNumberAdd:  // Fall through.
+      return simplified()->CheckedInt32Add();
+    case IrOpcode::kSpeculativeNumberSubtract:  // Fall through.
+      return simplified()->CheckedInt32Sub();
+    default:
+      UNREACHABLE();
+      return nullptr;
+  }
+}
 
 const Operator* RepresentationChanger::Uint32OperatorFor(
     IrOpcode::Value opcode) {
@@ -477,17 +631,23 @@
       return machine()->Int32Add();
     case IrOpcode::kNumberSubtract:
       return machine()->Int32Sub();
+    case IrOpcode::kSpeculativeNumberMultiply:
     case IrOpcode::kNumberMultiply:
       return machine()->Int32Mul();
+    case IrOpcode::kSpeculativeNumberDivide:
     case IrOpcode::kNumberDivide:
       return machine()->Uint32Div();
+    case IrOpcode::kSpeculativeNumberModulus:
     case IrOpcode::kNumberModulus:
       return machine()->Uint32Mod();
     case IrOpcode::kNumberEqual:
+    case IrOpcode::kSpeculativeNumberEqual:
       return machine()->Word32Equal();
     case IrOpcode::kNumberLessThan:
+    case IrOpcode::kSpeculativeNumberLessThan:
       return machine()->Uint32LessThan();
     case IrOpcode::kNumberLessThanOrEqual:
+    case IrOpcode::kSpeculativeNumberLessThanOrEqual:
       return machine()->Uint32LessThanOrEqual();
     case IrOpcode::kNumberClz32:
       return machine()->Word32Clz();
@@ -503,22 +663,64 @@
 const Operator* RepresentationChanger::Float64OperatorFor(
     IrOpcode::Value opcode) {
   switch (opcode) {
+    case IrOpcode::kSpeculativeNumberAdd:
     case IrOpcode::kNumberAdd:
       return machine()->Float64Add();
+    case IrOpcode::kSpeculativeNumberSubtract:
     case IrOpcode::kNumberSubtract:
       return machine()->Float64Sub();
+    case IrOpcode::kSpeculativeNumberMultiply:
     case IrOpcode::kNumberMultiply:
       return machine()->Float64Mul();
+    case IrOpcode::kSpeculativeNumberDivide:
     case IrOpcode::kNumberDivide:
       return machine()->Float64Div();
+    case IrOpcode::kSpeculativeNumberModulus:
     case IrOpcode::kNumberModulus:
       return machine()->Float64Mod();
     case IrOpcode::kNumberEqual:
+    case IrOpcode::kSpeculativeNumberEqual:
       return machine()->Float64Equal();
     case IrOpcode::kNumberLessThan:
+    case IrOpcode::kSpeculativeNumberLessThan:
       return machine()->Float64LessThan();
     case IrOpcode::kNumberLessThanOrEqual:
+    case IrOpcode::kSpeculativeNumberLessThanOrEqual:
       return machine()->Float64LessThanOrEqual();
+    case IrOpcode::kNumberAbs:
+      return machine()->Float64Abs();
+    case IrOpcode::kNumberAtan:
+      return machine()->Float64Atan();
+    case IrOpcode::kNumberAtan2:
+      return machine()->Float64Atan2();
+    case IrOpcode::kNumberCos:
+      return machine()->Float64Cos();
+    case IrOpcode::kNumberExp:
+      return machine()->Float64Exp();
+    case IrOpcode::kNumberFround:
+      return machine()->TruncateFloat64ToFloat32();
+    case IrOpcode::kNumberAtanh:
+      return machine()->Float64Atanh();
+    case IrOpcode::kNumberLog:
+      return machine()->Float64Log();
+    case IrOpcode::kNumberLog1p:
+      return machine()->Float64Log1p();
+    case IrOpcode::kNumberLog2:
+      return machine()->Float64Log2();
+    case IrOpcode::kNumberLog10:
+      return machine()->Float64Log10();
+    case IrOpcode::kNumberSin:
+      return machine()->Float64Sin();
+    case IrOpcode::kNumberTan:
+      return machine()->Float64Tan();
+    case IrOpcode::kNumberSqrt:
+      return machine()->Float64Sqrt();
+    case IrOpcode::kNumberCbrt:
+      return machine()->Float64Cbrt();
+    case IrOpcode::kNumberExpm1:
+      return machine()->Float64Expm1();
+    case IrOpcode::kNumberSilenceNaN:
+      return machine()->Float64SilenceNaN();
     default:
       UNREACHABLE();
       return nullptr;
diff --git a/src/compiler/representation-change.h b/src/compiler/representation-change.h
index 839335d..8a38644 100644
--- a/src/compiler/representation-change.h
+++ b/src/compiler/representation-change.h
@@ -73,6 +73,86 @@
   static bool LessGeneral(TruncationKind rep1, TruncationKind rep2);
 };
 
+enum class TypeCheckKind : uint8_t {
+  kNone,
+  kSigned32,
+  kNumberOrUndefined,
+  kNumber
+};
+
+// The {UseInfo} class is used to describe a use of an input of a node.
+//
+// This information is used in two different ways, based on the phase:
+//
+// 1. During propagation, the use info is used to inform the input node
+//    about what part of the input is used (we call this truncation) and what
+//    is the preferred representation.
+//
+// 2. During lowering, the use info is used to properly convert the input
+//    to the preferred representation. The preferred representation might be
+//    insufficient to do the conversion (e.g. word32->float64 conv), so we also
+//    need the signedness information to produce the correct value.
+class UseInfo {
+ public:
+  UseInfo(MachineRepresentation representation, Truncation truncation,
+          TypeCheckKind type_check = TypeCheckKind::kNone)
+      : representation_(representation),
+        truncation_(truncation),
+        type_check_(type_check) {}
+  static UseInfo TruncatingWord32() {
+    return UseInfo(MachineRepresentation::kWord32, Truncation::Word32());
+  }
+  static UseInfo TruncatingWord64() {
+    return UseInfo(MachineRepresentation::kWord64, Truncation::Word64());
+  }
+  static UseInfo Bool() {
+    return UseInfo(MachineRepresentation::kBit, Truncation::Bool());
+  }
+  static UseInfo TruncatingFloat32() {
+    return UseInfo(MachineRepresentation::kFloat32, Truncation::Float32());
+  }
+  static UseInfo TruncatingFloat64() {
+    return UseInfo(MachineRepresentation::kFloat64, Truncation::Float64());
+  }
+  static UseInfo PointerInt() {
+    return kPointerSize == 4 ? TruncatingWord32() : TruncatingWord64();
+  }
+  static UseInfo AnyTagged() {
+    return UseInfo(MachineRepresentation::kTagged, Truncation::Any());
+  }
+
+  // Possibly deoptimizing conversions.
+  static UseInfo CheckedSigned32AsWord32() {
+    return UseInfo(MachineRepresentation::kWord32, Truncation::Any(),
+                   TypeCheckKind::kSigned32);
+  }
+  static UseInfo CheckedNumberOrUndefinedAsFloat64() {
+    return UseInfo(MachineRepresentation::kFloat64, Truncation::Any(),
+                   TypeCheckKind::kNumberOrUndefined);
+  }
+
+  // Undetermined representation.
+  static UseInfo Any() {
+    return UseInfo(MachineRepresentation::kNone, Truncation::Any());
+  }
+  static UseInfo AnyTruncatingToBool() {
+    return UseInfo(MachineRepresentation::kNone, Truncation::Bool());
+  }
+
+  // Value not used.
+  static UseInfo None() {
+    return UseInfo(MachineRepresentation::kNone, Truncation::None());
+  }
+
+  MachineRepresentation representation() const { return representation_; }
+  Truncation truncation() const { return truncation_; }
+  TypeCheckKind type_check() const { return type_check_; }
+
+ private:
+  MachineRepresentation representation_;
+  Truncation truncation_;
+  TypeCheckKind type_check_;
+};
 
 // Contains logic related to changing the representation of values for constants
 // and other nodes, as well as lowering Simplified->Machine operators.
@@ -90,9 +170,10 @@
   // out signedness for the word32->float64 conversion, then we check that the
   // uses truncate to word32 (so they do not care about signedness).
   Node* GetRepresentationFor(Node* node, MachineRepresentation output_rep,
-                             Type* output_type, MachineRepresentation use_rep,
-                             Truncation truncation = Truncation::None());
+                             Type* output_type, Node* use_node,
+                             UseInfo use_info);
   const Operator* Int32OperatorFor(IrOpcode::Value opcode);
+  const Operator* Int32OverflowOperatorFor(IrOpcode::Value opcode);
   const Operator* Uint32OperatorFor(IrOpcode::Value opcode);
   const Operator* Float64OperatorFor(IrOpcode::Value opcode);
 
@@ -122,13 +203,20 @@
                                     Type* output_type, Truncation truncation);
   Node* GetFloat64RepresentationFor(Node* node,
                                     MachineRepresentation output_rep,
-                                    Type* output_type, Truncation truncation);
+                                    Type* output_type, Node* use_node,
+                                    UseInfo use_info);
   Node* GetWord32RepresentationFor(Node* node, MachineRepresentation output_rep,
-                                   Type* output_type, Truncation truncation);
+                                   Type* output_type, Node* use_node,
+                                   UseInfo use_info);
   Node* GetBitRepresentationFor(Node* node, MachineRepresentation output_rep,
                                 Type* output_type);
   Node* GetWord64RepresentationFor(Node* node, MachineRepresentation output_rep,
                                    Type* output_type);
+  Node* GetCheckedWord32RepresentationFor(Node* node,
+                                          MachineRepresentation output_rep,
+                                          Type* output_type, Node* use_node,
+                                          Truncation truncation,
+                                          TypeCheckKind check);
   Node* TypeError(Node* node, MachineRepresentation output_rep,
                   Type* output_type, MachineRepresentation use);
   Node* MakeTruncatedInt32Constant(double value);
@@ -138,6 +226,8 @@
   Node* InsertChangeTaggedSignedToInt32(Node* node);
   Node* InsertChangeTaggedToFloat64(Node* node);
 
+  Node* InsertConversion(Node* node, const Operator* op, Node* use_node);
+
   JSGraph* jsgraph() const { return jsgraph_; }
   Isolate* isolate() const { return isolate_; }
   Factory* factory() const { return isolate()->factory(); }
diff --git a/src/compiler/s390/OWNERS b/src/compiler/s390/OWNERS
index eb007cb..752e8e3 100644
--- a/src/compiler/s390/OWNERS
+++ b/src/compiler/s390/OWNERS
@@ -3,3 +3,4 @@
 joransiu@ca.ibm.com
 mbrandy@us.ibm.com
 michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/src/compiler/s390/code-generator-s390.cc b/src/compiler/s390/code-generator-s390.cc
index fece596..ac24529 100644
--- a/src/compiler/s390/code-generator-s390.cc
+++ b/src/compiler/s390/code-generator-s390.cc
@@ -385,6 +385,33 @@
     __ MovFromFloatResult(i.OutputDoubleRegister());                          \
   } while (0)
 
+#define ASSEMBLE_IEEE754_UNOP(name)                                            \
+  do {                                                                         \
+    /* TODO(bmeurer): We should really get rid of this special instruction, */ \
+    /* and generate a CallAddress instruction instead. */                      \
+    FrameScope scope(masm(), StackFrame::MANUAL);                              \
+    __ PrepareCallCFunction(0, 1, kScratchReg);                                \
+    __ MovToFloatParameter(i.InputDoubleRegister(0));                          \
+    __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()),  \
+                     0, 1);                                                    \
+    /* Move the result in the double result register. */                       \
+    __ MovFromFloatResult(i.OutputDoubleRegister());                           \
+  } while (0)
+
+#define ASSEMBLE_IEEE754_BINOP(name)                                           \
+  do {                                                                         \
+    /* TODO(bmeurer): We should really get rid of this special instruction, */ \
+    /* and generate a CallAddress instruction instead. */                      \
+    FrameScope scope(masm(), StackFrame::MANUAL);                              \
+    __ PrepareCallCFunction(0, 2, kScratchReg);                                \
+    __ MovToFloatParameters(i.InputDoubleRegister(0),                          \
+                            i.InputDoubleRegister(1));                         \
+    __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()),  \
+                     0, 2);                                                    \
+    /* Move the result in the double result register. */                       \
+    __ MovFromFloatResult(i.OutputDoubleRegister());                           \
+  } while (0)
+
 #define ASSEMBLE_FLOAT_MAX(double_scratch_reg, general_scratch_reg) \
   do {                                                              \
     Label ge, done;                                                 \
@@ -720,6 +747,9 @@
     case kArchTableSwitch:
       AssembleArchTableSwitch(instr);
       break;
+    case kArchDebugBreak:
+      __ stop("kArchDebugBreak");
+      break;
     case kArchNop:
     case kArchThrowTerminator:
       // don't emit code for nops.
@@ -1216,6 +1246,45 @@
     case kS390_ModDouble:
       ASSEMBLE_FLOAT_MODULO();
       break;
+    case kIeee754Float64Atan:
+      ASSEMBLE_IEEE754_UNOP(atan);
+      break;
+    case kIeee754Float64Atan2:
+      ASSEMBLE_IEEE754_BINOP(atan2);
+      break;
+    case kIeee754Float64Tan:
+      ASSEMBLE_IEEE754_UNOP(tan);
+      break;
+    case kIeee754Float64Cbrt:
+      ASSEMBLE_IEEE754_UNOP(cbrt);
+      break;
+    case kIeee754Float64Sin:
+      ASSEMBLE_IEEE754_UNOP(sin);
+      break;
+    case kIeee754Float64Cos:
+      ASSEMBLE_IEEE754_UNOP(cos);
+      break;
+    case kIeee754Float64Exp:
+      ASSEMBLE_IEEE754_UNOP(exp);
+      break;
+    case kIeee754Float64Expm1:
+      ASSEMBLE_IEEE754_UNOP(expm1);
+      break;
+    case kIeee754Float64Atanh:
+      ASSEMBLE_IEEE754_UNOP(atanh);
+      break;
+    case kIeee754Float64Log:
+      ASSEMBLE_IEEE754_UNOP(log);
+      break;
+    case kIeee754Float64Log1p:
+      ASSEMBLE_IEEE754_UNOP(log1p);
+      break;
+    case kIeee754Float64Log2:
+      ASSEMBLE_IEEE754_UNOP(log2);
+      break;
+    case kIeee754Float64Log10:
+      ASSEMBLE_IEEE754_UNOP(log10);
+      break;
     case kS390_Neg:
       __ LoadComplementRR(i.OutputRegister(), i.InputRegister(0));
       break;
@@ -1301,6 +1370,12 @@
       }
       break;
 #endif
+    case kS390_Float64SilenceNaN: {
+      DoubleRegister value = i.InputDoubleRegister(0);
+      DoubleRegister result = i.OutputDoubleRegister();
+      __ CanonicalizeNaN(result, value);
+      break;
+    }
     case kS390_Push:
       if (instr->InputAt(0)->IsFPRegister()) {
         __ lay(sp, MemOperand(sp, -kDoubleSize));
@@ -1315,8 +1390,13 @@
       int num_slots = i.InputInt32(1);
       __ lay(sp, MemOperand(sp, -num_slots * kPointerSize));
       if (instr->InputAt(0)->IsFPRegister()) {
-        __ StoreDouble(i.InputDoubleRegister(0),
-                 MemOperand(sp));
+        LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
+        if (op->representation() == MachineRepresentation::kFloat64) {
+          __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
+        } else {
+          DCHECK(op->representation() == MachineRepresentation::kFloat32);
+          __ StoreFloat32(i.InputDoubleRegister(0), MemOperand(sp));
+        }
       } else {
         __ StoreP(i.InputRegister(0),
                   MemOperand(sp));
@@ -1326,8 +1406,15 @@
     case kS390_StoreToStackSlot: {
       int slot = i.InputInt32(1);
       if (instr->InputAt(0)->IsFPRegister()) {
-        __ StoreDouble(i.InputDoubleRegister(0),
-                       MemOperand(sp, slot * kPointerSize));
+        LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
+        if (op->representation() == MachineRepresentation::kFloat64) {
+          __ StoreDouble(i.InputDoubleRegister(0),
+                         MemOperand(sp, slot * kPointerSize));
+        } else {
+          DCHECK(op->representation() == MachineRepresentation::kFloat32);
+          __ StoreFloat32(i.InputDoubleRegister(0),
+                          MemOperand(sp, slot * kPointerSize));
+        }
       } else {
         __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
       }
@@ -1941,6 +2028,7 @@
           if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
 #else
           if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
               src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
 #endif
             __ mov(dst, Operand(src.ToInt32(), src.rmode()));
@@ -1950,7 +2038,8 @@
           break;
         case Constant::kInt64:
 #if V8_TARGET_ARCH_S390X
-          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE) {
+          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
             __ mov(dst, Operand(src.ToInt64(), src.rmode()));
           } else {
             DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
@@ -2014,17 +2103,33 @@
       __ Move(dst, src);
     } else {
       DCHECK(destination->IsFPStackSlot());
-      __ StoreDouble(src, g.ToMemOperand(destination));
+      LocationOperand* op = LocationOperand::cast(source);
+      if (op->representation() == MachineRepresentation::kFloat64) {
+        __ StoreDouble(src, g.ToMemOperand(destination));
+      } else {
+        __ StoreFloat32(src, g.ToMemOperand(destination));
+      }
     }
   } else if (source->IsFPStackSlot()) {
     DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
     MemOperand src = g.ToMemOperand(source);
     if (destination->IsFPRegister()) {
-      __ LoadDouble(g.ToDoubleRegister(destination), src);
+      LocationOperand* op = LocationOperand::cast(source);
+      if (op->representation() == MachineRepresentation::kFloat64) {
+        __ LoadDouble(g.ToDoubleRegister(destination), src);
+      } else {
+        __ LoadFloat32(g.ToDoubleRegister(destination), src);
+      }
     } else {
+      LocationOperand* op = LocationOperand::cast(source);
       DoubleRegister temp = kScratchDoubleReg;
-      __ LoadDouble(temp, src);
-      __ StoreDouble(temp, g.ToMemOperand(destination));
+      if (op->representation() == MachineRepresentation::kFloat64) {
+        __ LoadDouble(temp, src);
+        __ StoreDouble(temp, g.ToMemOperand(destination));
+      } else {
+        __ LoadFloat32(temp, src);
+        __ StoreFloat32(temp, g.ToMemOperand(destination));
+      }
     }
   } else {
     UNREACHABLE();
diff --git a/src/compiler/s390/instruction-codes-s390.h b/src/compiler/s390/instruction-codes-s390.h
index a54b2ed..b53136c 100644
--- a/src/compiler/s390/instruction-codes-s390.h
+++ b/src/compiler/s390/instruction-codes-s390.h
@@ -107,6 +107,7 @@
   V(S390_Float32ToInt32)           \
   V(S390_Float32ToUint32)          \
   V(S390_Float32ToDouble)          \
+  V(S390_Float64SilenceNaN)        \
   V(S390_DoubleToInt32)            \
   V(S390_DoubleToUint32)           \
   V(S390_DoubleToInt64)            \
diff --git a/src/compiler/s390/instruction-scheduler-s390.cc b/src/compiler/s390/instruction-scheduler-s390.cc
index d187227..5b9722e 100644
--- a/src/compiler/s390/instruction-scheduler-s390.cc
+++ b/src/compiler/s390/instruction-scheduler-s390.cc
@@ -104,6 +104,7 @@
     case kS390_Float32ToUint32:
     case kS390_Float32ToUint64:
     case kS390_Float32ToDouble:
+    case kS390_Float64SilenceNaN:
     case kS390_DoubleToInt32:
     case kS390_DoubleToUint32:
     case kS390_Float32ToInt64:
diff --git a/src/compiler/s390/instruction-selector-s390.cc b/src/compiler/s390/instruction-selector-s390.cc
index 00782d1..1b1bd2f 100644
--- a/src/compiler/s390/instruction-selector-s390.cc
+++ b/src/compiler/s390/instruction-selector-s390.cc
@@ -1179,6 +1179,10 @@
 
 void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
 
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+  VisitRR(this, kS390_Float64SilenceNaN, node);
+}
+
 void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
 
 void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
@@ -1195,6 +1199,21 @@
   VisitRR(this, kS390_SqrtFloat, node);
 }
 
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+                                                  InstructionCode opcode) {
+  S390OperandGenerator g(this);
+  Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0))
+      ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+                                                   InstructionCode opcode) {
+  S390OperandGenerator g(this);
+  Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
+       g.UseFixed(node->InputAt(1), d2))
+      ->MarkAsCall();
+}
+
 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
   VisitRR(this, kS390_SqrtDouble, node);
 }
@@ -1235,6 +1254,10 @@
   UNREACHABLE();
 }
 
+void InstructionSelector::VisitFloat32Neg(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Neg(Node* node) { UNREACHABLE(); }
+
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
@@ -1822,6 +1845,13 @@
          MachineOperatorBuilder::kWord64Popcnt;
 }
 
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+  return MachineOperatorBuilder::AlignmentRequirements::
+      FullUnalignedAccessSupport();
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc
index a76d3e2..c56494c 100644
--- a/src/compiler/simplified-lowering.cc
+++ b/src/compiler/simplified-lowering.cc
@@ -15,6 +15,7 @@
 #include "src/compiler/linkage.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties.h"
+#include "src/compiler/operation-typer.h"
 #include "src/compiler/operator-properties.h"
 #include "src/compiler/representation-change.h"
 #include "src/compiler/simplified-operator.h"
@@ -62,63 +63,6 @@
 
 namespace {
 
-// The {UseInfo} class is used to describe a use of an input of a node.
-//
-// This information is used in two different ways, based on the phase:
-//
-// 1. During propagation, the use info is used to inform the input node
-//    about what part of the input is used (we call this truncation) and what
-//    is the preferred representation.
-//
-// 2. During lowering, the use info is used to properly convert the input
-//    to the preferred representation. The preferred representation might be
-//    insufficient to do the conversion (e.g. word32->float64 conv), so we also
-//    need the signedness information to produce the correct value.
-class UseInfo {
- public:
-  UseInfo(MachineRepresentation preferred, Truncation truncation)
-      : preferred_(preferred), truncation_(truncation) {}
-  static UseInfo TruncatingWord32() {
-    return UseInfo(MachineRepresentation::kWord32, Truncation::Word32());
-  }
-  static UseInfo TruncatingWord64() {
-    return UseInfo(MachineRepresentation::kWord64, Truncation::Word64());
-  }
-  static UseInfo Bool() {
-    return UseInfo(MachineRepresentation::kBit, Truncation::Bool());
-  }
-  static UseInfo TruncatingFloat32() {
-    return UseInfo(MachineRepresentation::kFloat32, Truncation::Float32());
-  }
-  static UseInfo TruncatingFloat64() {
-    return UseInfo(MachineRepresentation::kFloat64, Truncation::Float64());
-  }
-  static UseInfo PointerInt() {
-    return kPointerSize == 4 ? TruncatingWord32() : TruncatingWord64();
-  }
-  static UseInfo AnyTagged() {
-    return UseInfo(MachineRepresentation::kTagged, Truncation::Any());
-  }
-
-  // Undetermined representation.
-  static UseInfo Any() {
-    return UseInfo(MachineRepresentation::kNone, Truncation::Any());
-  }
-  static UseInfo None() {
-    return UseInfo(MachineRepresentation::kNone, Truncation::None());
-  }
-  static UseInfo AnyTruncatingToBool() {
-    return UseInfo(MachineRepresentation::kNone, Truncation::Bool());
-  }
-
-  MachineRepresentation preferred() const { return preferred_; }
-  Truncation truncation() const { return truncation_; }
-
- private:
-  MachineRepresentation preferred_;
-  Truncation truncation_;
-};
-
 
 UseInfo TruncatingUseInfoFromRepresentation(MachineRepresentation rep) {
   switch (rep) {
@@ -223,7 +167,8 @@
   ZoneVector<UseInfo> input_use_infos_;
 
   static bool IsUseLessGeneral(UseInfo use1, UseInfo use2) {
-    return MachineRepresentationIsSubtype(use1.preferred(), use2.preferred()) &&
+    return MachineRepresentationIsSubtype(use1.representation(),
+                                          use2.representation()) &&
            use1.truncation().IsLessGeneralThan(use2.truncation());
   }
 };
@@ -246,27 +191,43 @@
       return truncation_ != old_truncation;
     }
 
-    void set_queued(bool value) { queued_ = value; }
-    bool queued() const { return queued_; }
-    void set_visited() { visited_ = true; }
-    bool visited() const { return visited_; }
+    void set_queued() { state_ = kQueued; }
+    void set_visited() { state_ = kVisited; }
+    void set_pushed() { state_ = kPushed; }
+    void reset_state() { state_ = kUnvisited; }
+    bool visited() const { return state_ == kVisited; }
+    bool queued() const { return state_ == kQueued; }
+    bool unvisited() const { return state_ == kUnvisited; }
     Truncation truncation() const { return truncation_; }
     void set_output(MachineRepresentation output) { representation_ = output; }
 
     MachineRepresentation representation() const { return representation_; }
 
+    // Helpers for feedback typing.
+    void set_feedback_type(Type* type) { feedback_type_ = type; }
+    Type* feedback_type() { return feedback_type_; }
+    void set_weakened() { weakened_ = true; }
+    bool weakened() { return weakened_; }
+    TypeCheckKind type_check() { return type_check_; }
+    void set_type_check(TypeCheckKind type_check) { type_check_ = type_check; }
+
    private:
-    bool queued_ = false;   // Bookkeeping for the traversal.
-    bool visited_ = false;  // Bookkeeping for the traversal.
+    enum State : uint8_t { kUnvisited, kPushed, kVisited, kQueued };
+    State state_ = kUnvisited;
     MachineRepresentation representation_ =
         MachineRepresentation::kNone;             // Output representation.
     Truncation truncation_ = Truncation::None();  // Information about uses.
+    TypeCheckKind type_check_ = TypeCheckKind::kNone;  // Runtime check kind.
+
+    Type* feedback_type_ = nullptr;
+    bool weakened_ = false;
   };
 
   RepresentationSelector(JSGraph* jsgraph, Zone* zone,
                          RepresentationChanger* changer,
                          SourcePositionTable* source_positions)
       : jsgraph_(jsgraph),
+        zone_(zone),
         count_(jsgraph->graph()->NodeCount()),
         info_(count_, zone),
 #ifdef DEBUG
@@ -277,11 +238,320 @@
         phase_(PROPAGATE),
         changer_(changer),
         queue_(zone),
+        typing_stack_(zone),
         source_positions_(source_positions),
-        type_cache_(TypeCache::Get()) {
+        type_cache_(TypeCache::Get()),
+        op_typer_(jsgraph->isolate(), graph_zone()) {
   }
 
-  void Run(SimplifiedLowering* lowering) {
+  // Forward propagation of types from type feedback.
+  void RunTypePropagationPhase() {
+    DCHECK(typing_stack_.empty());
+
+    typing_stack_.push({graph()->end(), 0});
+    GetInfo(graph()->end())->set_pushed();
+    while (!typing_stack_.empty()) {
+      NodeState& current = typing_stack_.top();
+
+      // If there is an unvisited input, push it and continue.
+      bool pushed_unvisited = false;
+      while (current.input_index < current.node->InputCount()) {
+        Node* input = current.node->InputAt(current.input_index);
+        NodeInfo* input_info = GetInfo(input);
+        current.input_index++;
+        if (input_info->unvisited()) {
+          input_info->set_pushed();
+          typing_stack_.push({input, 0});
+          pushed_unvisited = true;
+          break;
+        }
+      }
+      if (pushed_unvisited) continue;
+
+      // Process the top of the stack.
+      Node* node = current.node;
+      typing_stack_.pop();
+      NodeInfo* info = GetInfo(node);
+      info->set_visited();
+      bool updated = UpdateFeedbackType(node);
+      if (updated) {
+        for (Node* const user : node->uses()) {
+          if (GetInfo(user)->visited()) {
+            GetInfo(user)->set_queued();
+            queue_.push(user);
+          }
+        }
+      }
+    }
+
+    // Process the revisit queue.
+    while (!queue_.empty()) {
+      Node* node = queue_.front();
+      queue_.pop();
+      NodeInfo* info = GetInfo(node);
+      info->set_visited();
+      bool updated = UpdateFeedbackType(node);
+      if (updated) {
+        for (Node* const user : node->uses()) {
+          if (GetInfo(user)->visited()) {
+            GetInfo(user)->set_queued();
+            queue_.push(user);
+          }
+        }
+      }
+    }
+  }
+
+  void ResetNodeInfoState() {
+    // Clean up for the next phase.
+    for (NodeInfo& info : info_) {
+      info.reset_state();
+    }
+  }
+
+  Type* TypeOf(Node* node) {
+    Type* type = GetInfo(node)->feedback_type();
+    return type == nullptr ? NodeProperties::GetType(node) : type;
+  }
+
+  Type* FeedbackTypeOf(Node* node) {
+    Type* type = GetInfo(node)->feedback_type();
+    return type == nullptr ? Type::None() : type;
+  }
+
+  Type* TypePhi(Node* node) {
+    int arity = node->op()->ValueInputCount();
+    Type* type = FeedbackTypeOf(node->InputAt(0));
+    for (int i = 1; i < arity; ++i) {
+      type = op_typer_.Merge(type, FeedbackTypeOf(node->InputAt(i)));
+    }
+    return type;
+  }
+
+  Type* TypeSelect(Node* node) {
+    return op_typer_.Merge(FeedbackTypeOf(node->InputAt(1)),
+                           FeedbackTypeOf(node->InputAt(2)));
+  }
+
+  static Type* TypeOfSpeculativeOp(TypeCheckKind type_check) {
+    switch (type_check) {
+      case TypeCheckKind::kNone:
+        return Type::Any();
+      case TypeCheckKind::kSigned32:
+        return Type::Signed32();
+      case TypeCheckKind::kNumber:
+        return Type::Number();
+      // Unexpected cases.
+      case TypeCheckKind::kNumberOrUndefined:
+        FATAL("Unexpected checked type.");
+        break;
+    }
+    UNREACHABLE();
+    return nullptr;
+  }
+
+  bool UpdateFeedbackType(Node* node) {
+    if (node->op()->ValueOutputCount() == 0) return false;
+
+    NodeInfo* info = GetInfo(node);
+    Type* type = info->feedback_type();
+    Type* new_type = type;
+
+    switch (node->opcode()) {
+      case IrOpcode::kSpeculativeNumberAdd: {
+        Type* lhs = FeedbackTypeOf(node->InputAt(0));
+        Type* rhs = FeedbackTypeOf(node->InputAt(1));
+        if (lhs->Is(Type::None()) || rhs->Is(Type::None())) return false;
+        // TODO(jarin) The ToNumber conversion is too conservative here,
+        // e.g. it will treat true as 1 even though the number check will
+        // fail on a boolean. OperationTyper should have a function that
+        // computes a more precise type.
+        lhs = op_typer_.ToNumber(lhs);
+        rhs = op_typer_.ToNumber(rhs);
+        Type* static_type = op_typer_.NumericAdd(lhs, rhs);
+        if (info->type_check() == TypeCheckKind::kNone) {
+          new_type = static_type;
+        } else {
+          Type* feedback_type = TypeOfSpeculativeOp(info->type_check());
+          new_type = Type::Intersect(static_type, feedback_type, graph_zone());
+        }
+        break;
+      }
+
+      case IrOpcode::kSpeculativeNumberSubtract: {
+        Type* lhs = FeedbackTypeOf(node->InputAt(0));
+        Type* rhs = FeedbackTypeOf(node->InputAt(1));
+        if (lhs->Is(Type::None()) || rhs->Is(Type::None())) return false;
+        // TODO(jarin) The ToNumber conversion is too conservative here,
+        // e.g. it will treat true as 1 even though the number check will
+        // fail on a boolean. OperationTyper should have a function that
+        // computes a more precise type.
+        lhs = op_typer_.ToNumber(lhs);
+        rhs = op_typer_.ToNumber(rhs);
+        Type* static_type = op_typer_.NumericSubtract(lhs, rhs);
+        if (info->type_check() == TypeCheckKind::kNone) {
+          new_type = static_type;
+        } else {
+          Type* feedback_type = TypeOfSpeculativeOp(info->type_check());
+          new_type = Type::Intersect(static_type, feedback_type, graph_zone());
+        }
+        break;
+      }
+
+      case IrOpcode::kSpeculativeNumberMultiply: {
+        Type* lhs = FeedbackTypeOf(node->InputAt(0));
+        Type* rhs = FeedbackTypeOf(node->InputAt(1));
+        if (lhs->Is(Type::None()) || rhs->Is(Type::None())) return false;
+        // TODO(jarin) The ToNumber conversion is too conservative here,
+        // e.g. it will treat true as 1 even though the number check will
+        // fail on a boolean. OperationTyper should have a function that
+        // computes a more precise type.
+        lhs = op_typer_.ToNumber(lhs);
+        rhs = op_typer_.ToNumber(rhs);
+        Type* static_type = op_typer_.NumericMultiply(lhs, rhs);
+        if (info->type_check() == TypeCheckKind::kNone) {
+          new_type = static_type;
+        } else {
+          Type* feedback_type = TypeOfSpeculativeOp(info->type_check());
+          new_type = Type::Intersect(static_type, feedback_type, graph_zone());
+        }
+        break;
+      }
+
+      case IrOpcode::kSpeculativeNumberDivide: {
+        Type* lhs = FeedbackTypeOf(node->InputAt(0));
+        Type* rhs = FeedbackTypeOf(node->InputAt(1));
+        if (lhs->Is(Type::None()) || rhs->Is(Type::None())) return false;
+        // TODO(jarin) The ToNumber conversion is too conservative here,
+        // e.g. it will treat true as 1 even though the number check will
+        // fail on a boolean. OperationTyper should have a function that
+        // computes a more precise type.
+        lhs = op_typer_.ToNumber(lhs);
+        rhs = op_typer_.ToNumber(rhs);
+        Type* static_type = op_typer_.NumericDivide(lhs, rhs);
+        if (info->type_check() == TypeCheckKind::kNone) {
+          new_type = static_type;
+        } else {
+          Type* feedback_type = TypeOfSpeculativeOp(info->type_check());
+          new_type = Type::Intersect(static_type, feedback_type, graph_zone());
+        }
+        break;
+      }
+
+      case IrOpcode::kSpeculativeNumberModulus: {
+        Type* lhs = FeedbackTypeOf(node->InputAt(0));
+        Type* rhs = FeedbackTypeOf(node->InputAt(1));
+        if (lhs->Is(Type::None()) || rhs->Is(Type::None())) return false;
+        // TODO(jarin) The ToNumber conversion is too conservative here,
+        // e.g. it will treat true as 1 even though the number check will
+        // fail on a boolean. OperationTyper should have a function that
+        // computes a more precise type.
+        lhs = op_typer_.ToNumber(lhs);
+        rhs = op_typer_.ToNumber(rhs);
+        Type* static_type = op_typer_.NumericModulus(lhs, rhs);
+        if (info->type_check() == TypeCheckKind::kNone) {
+          new_type = static_type;
+        } else {
+          Type* feedback_type = TypeOfSpeculativeOp(info->type_check());
+          new_type = Type::Intersect(static_type, feedback_type, graph_zone());
+        }
+        break;
+      }
+
+      case IrOpcode::kPhi: {
+        new_type = TypePhi(node);
+        if (type != nullptr) {
+          new_type = Weaken(node, type, new_type);
+        }
+        // Recompute the phi representation based on the new type.
+        MachineRepresentation output =
+            GetOutputInfoForPhi(node, GetInfo(node)->truncation(), new_type);
+        ResetOutput(node, output);
+        break;
+      }
+
+      case IrOpcode::kSelect: {
+        new_type = TypeSelect(node);
+        // Recompute representation based on the new type.
+        MachineRepresentation output =
+            GetOutputInfoForPhi(node, GetInfo(node)->truncation(), new_type);
+        ResetOutput(node, output);
+        break;
+      }
+
+      default:
+        // Shortcut for operations that we do not handle.
+        if (type == nullptr) {
+          GetInfo(node)->set_feedback_type(NodeProperties::GetType(node));
+          return true;
+        }
+        return false;
+    }
+    if (type != nullptr && new_type->Is(type)) return false;
+    GetInfo(node)->set_feedback_type(new_type);
+    if (FLAG_trace_representation) {
+      PrintNodeFeedbackType(node);
+    }
+    return true;
+  }
+
+  void PrintNodeFeedbackType(Node* n) {
+    OFStream os(stdout);
+    os << "#" << n->id() << ":" << *n->op() << "(";
+    int j = 0;
+    for (Node* const i : n->inputs()) {
+      if (j++ > 0) os << ", ";
+      os << "#" << i->id() << ":" << i->op()->mnemonic();
+    }
+    os << ")";
+    if (NodeProperties::IsTyped(n)) {
+      os << "  [Static type: ";
+      Type* static_type = NodeProperties::GetType(n);
+      static_type->PrintTo(os);
+      Type* feedback_type = GetInfo(n)->feedback_type();
+      if (feedback_type != nullptr && feedback_type != static_type) {
+        os << ", Feedback type: ";
+        feedback_type->PrintTo(os);
+      }
+      os << "]";
+    }
+    os << std::endl;
+  }
+
+  Type* Weaken(Node* node, Type* previous_type, Type* current_type) {
+    // If the types have nothing to do with integers, return the types.
+    Type* const integer = type_cache_.kInteger;
+    if (!previous_type->Maybe(integer)) {
+      return current_type;
+    }
+    DCHECK(current_type->Maybe(integer));
+
+    Type* current_integer =
+        Type::Intersect(current_type, integer, graph_zone());
+    Type* previous_integer =
+        Type::Intersect(previous_type, integer, graph_zone());
+
+    // Once we start weakening a node, we should always weaken.
+    if (!GetInfo(node)->weakened()) {
+      // Only weaken if there is range involved; we should converge quickly
+      // for all other types (the exception is a union of many constants,
+      // but we currently do not increase the number of constants in unions).
+      Type* previous = previous_integer->GetRange();
+      Type* current = current_integer->GetRange();
+      if (current == nullptr || previous == nullptr) {
+        return current_type;
+      }
+      // Range is involved => we are weakening.
+      GetInfo(node)->set_weakened();
+    }
+
+    return Type::Union(current_type,
+                       op_typer_.WeakenRange(previous_integer, current_integer),
+                       graph_zone());
+  }
+
+  // Backward propagation of truncations.
+  void RunTruncationPropagationPhase() {
     // Run propagation phase to a fixpoint.
     TRACE("--{Propagation phase}--\n");
     phase_ = PROPAGATE;
@@ -291,13 +561,22 @@
       Node* node = queue_.front();
       NodeInfo* info = GetInfo(node);
       queue_.pop();
-      info->set_queued(false);
+      info->set_visited();
       TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
       VisitNode(node, info->truncation(), nullptr);
       TRACE("  ==> output ");
       PrintOutputInfo(info);
       TRACE("\n");
     }
+  }
+
+  void Run(SimplifiedLowering* lowering) {
+    RunTruncationPropagationPhase();
+
+    if (lowering->flags() & SimplifiedLowering::kTypeFeedbackEnabled) {
+      ResetNodeInfoState();
+      RunTypePropagationPhase();
+    }
 
     // Run lowering and change insertion phase.
     TRACE("--{Simplified lowering phase}--\n");
@@ -319,6 +598,7 @@
       Node* node = *i;
       Node* replacement = *(++i);
       node->ReplaceUses(replacement);
+      node->Kill();
       // We also need to replace the node in the rest of the vector.
       for (NodeVector::iterator j = i + 1; j != replacements_.end(); ++j) {
         ++j;
@@ -329,8 +609,7 @@
 
   void EnqueueInitial(Node* node) {
     NodeInfo* info = GetInfo(node);
-    info->set_visited();
-    info->set_queued(true);
+    info->set_queued();
     nodes_.push_back(node);
     queue_.push(node);
   }
@@ -348,10 +627,9 @@
     node_input_use_infos_[use_node->id()].SetAndCheckInput(use_node, index,
                                                            use_info);
 #endif  // DEBUG
-    if (!info->visited()) {
+    if (info->unvisited()) {
       // First visit of this node.
-      info->set_visited();
-      info->set_queued(true);
+      info->set_queued();
       nodes_.push_back(node);
       queue_.push(node);
       TRACE("  initial: ");
@@ -365,7 +643,7 @@
       // New usage information for the node is available.
       if (!info->queued()) {
         queue_.push(node);
-        info->set_queued(true);
+        info->set_queued();
         TRACE("   added: ");
       } else {
         TRACE(" inqueue: ");
@@ -375,48 +653,39 @@
   }
 
   bool lower() { return phase_ == LOWER; }
+  bool propagate() { return phase_ == PROPAGATE; }
 
-  void EnqueueUses(Node* node) {
-    for (Edge edge : node->use_edges()) {
-      if (NodeProperties::IsValueEdge(edge)) {
-        Node* const user = edge.from();
-        if (user->id() < count_) {
-          // New type information for the node is available.
-          NodeInfo* info = GetInfo(user);
-          // Enqueue the node only if we are sure it is reachable from
-          // the end and it has not been queued yet.
-          if (info->visited() && !info->queued()) {
-            queue_.push(user);
-            info->set_queued(true);
-          }
-        }
-      }
-    }
+  void SetOutput(Node* node, MachineRepresentation representation,
+                 TypeCheckKind type_check = TypeCheckKind::kNone) {
+    DCHECK(MachineRepresentationIsSubtype(GetInfo(node)->representation(),
+                                          representation));
+    ResetOutput(node, representation, type_check);
   }
 
-  void SetOutput(Node* node, MachineRepresentation representation) {
+  void ResetOutput(Node* node, MachineRepresentation representation,
+                   TypeCheckKind type_check = TypeCheckKind::kNone) {
     NodeInfo* info = GetInfo(node);
-    DCHECK(
-        MachineRepresentationIsSubtype(info->representation(), representation));
     info->set_output(representation);
+    info->set_type_check(type_check);
   }
 
   Type* GetUpperBound(Node* node) { return NodeProperties::GetType(node); }
 
+  bool InputIs(Node* node, Type* type) {
+    DCHECK_EQ(1, node->op()->ValueInputCount());
+    return GetUpperBound(node->InputAt(0))->Is(type);
+  }
+
   bool BothInputsAreSigned32(Node* node) {
-    DCHECK_EQ(2, node->InputCount());
-    return GetUpperBound(node->InputAt(0))->Is(Type::Signed32()) &&
-           GetUpperBound(node->InputAt(1))->Is(Type::Signed32());
+    return BothInputsAre(node, Type::Signed32());
   }
 
   bool BothInputsAreUnsigned32(Node* node) {
-    DCHECK_EQ(2, node->InputCount());
-    return GetUpperBound(node->InputAt(0))->Is(Type::Unsigned32()) &&
-           GetUpperBound(node->InputAt(1))->Is(Type::Unsigned32());
+    return BothInputsAre(node, Type::Unsigned32());
   }
 
   bool BothInputsAre(Node* node, Type* type) {
-    DCHECK_EQ(2, node->InputCount());
+    DCHECK_EQ(2, node->op()->ValueInputCount());
     return GetUpperBound(node->InputAt(0))->Is(type) &&
            GetUpperBound(node->InputAt(1))->Is(type);
   }
@@ -424,11 +693,13 @@
   void ConvertInput(Node* node, int index, UseInfo use) {
     Node* input = node->InputAt(index);
     // In the change phase, insert a change before the use if necessary.
-    if (use.preferred() == MachineRepresentation::kNone)
+    if (use.representation() == MachineRepresentation::kNone)
       return;  // No input requirement on the use.
+    DCHECK_NOT_NULL(input);
     NodeInfo* input_info = GetInfo(input);
     MachineRepresentation input_rep = input_info->representation();
-    if (input_rep != use.preferred()) {
+    if (input_rep != use.representation() ||
+        use.type_check() != TypeCheckKind::kNone) {
       // Output representation doesn't match usage.
       TRACE("  change: #%d:%s(@%d #%d:%s) ", node->id(), node->op()->mnemonic(),
             index, input->id(), input->op()->mnemonic());
@@ -438,8 +709,7 @@
       PrintUseInfo(use);
       TRACE("\n");
       Node* n = changer_->GetRepresentationFor(
-          input, input_info->representation(), GetUpperBound(input),
-          use.preferred(), use.truncation());
+          input, input_info->representation(), TypeOf(input), node, use);
       node->ReplaceInput(index, n);
     }
   }
@@ -484,25 +754,28 @@
 
   // Helper for binops of the R x L -> O variety.
   void VisitBinop(Node* node, UseInfo left_use, UseInfo right_use,
-                  MachineRepresentation output) {
+                  MachineRepresentation output,
+                  TypeCheckKind type_check = TypeCheckKind::kNone) {
     DCHECK_EQ(2, node->op()->ValueInputCount());
     ProcessInput(node, 0, left_use);
     ProcessInput(node, 1, right_use);
     for (int i = 2; i < node->InputCount(); i++) {
       EnqueueInput(node, i);
     }
-    SetOutput(node, output);
+    SetOutput(node, output, type_check);
   }
 
   // Helper for binops of the I x I -> O variety.
-  void VisitBinop(Node* node, UseInfo input_use, MachineRepresentation output) {
-    VisitBinop(node, input_use, input_use, output);
+  void VisitBinop(Node* node, UseInfo input_use, MachineRepresentation output,
+                  TypeCheckKind type_check = TypeCheckKind::kNone) {
+    VisitBinop(node, input_use, input_use, output, type_check);
   }
 
   // Helper for unops of the I -> O variety.
   void VisitUnop(Node* node, UseInfo input_use, MachineRepresentation output) {
-    DCHECK_EQ(1, node->InputCount());
+    DCHECK_EQ(1, node->op()->ValueInputCount());
     ProcessInput(node, 0, input_use);
+    ProcessRemainingInputs(node, 1);
     SetOutput(node, output);
   }
 
@@ -554,9 +827,12 @@
   }
 
   // Infer representation for phi-like nodes.
-  MachineRepresentation GetOutputInfoForPhi(Node* node, Truncation use) {
+  MachineRepresentation GetOutputInfoForPhi(Node* node, Truncation use,
+                                            Type* type = nullptr) {
     // Compute the representation.
-    Type* type = GetUpperBound(node);
+    if (type == nullptr) {
+      type = TypeOf(node);
+    }
     if (type->Is(Type::None())) {
       return MachineRepresentation::kNone;
     } else if (type->Is(Type::Signed32()) || type->Is(Type::Unsigned32())) {
@@ -579,6 +855,7 @@
                        MachineRepresentation::kWord64;
 #ifdef DEBUG
       // Check that all the inputs agree on being Word64.
+      DCHECK_EQ(IrOpcode::kPhi, node->opcode());  // This only works for phis.
       for (int i = 1; i < node->op()->ValueInputCount(); i++) {
         DCHECK_EQ(is_word64, GetInfo(node->InputAt(i))->representation() ==
                                  MachineRepresentation::kWord64);
@@ -617,6 +894,8 @@
   void VisitPhi(Node* node, Truncation truncation,
                 SimplifiedLowering* lowering) {
     MachineRepresentation output = GetOutputInfoForPhi(node, truncation);
+    // Only set the output representation if not running with type
+    // feedback. (Feedback typing will set the representation.)
     SetOutput(node, output);
 
     int values = node->op()->ValueInputCount();
@@ -686,7 +965,7 @@
         Node* input = node->InputAt(i);
         NodeInfo* input_info = GetInfo(input);
         MachineType machine_type(input_info->representation(),
-                                 DeoptValueSemanticOf(GetUpperBound(input)));
+                                 DeoptValueSemanticOf(TypeOf(input)));
         DCHECK(machine_type.representation() !=
                    MachineRepresentation::kWord32 ||
                machine_type.semantic() == MachineSemantic::kInt32 ||
@@ -703,6 +982,10 @@
     return changer_->Int32OperatorFor(node->opcode());
   }
 
+  const Operator* Int32OverflowOp(Node* node) {
+    return changer_->Int32OverflowOperatorFor(node->opcode());
+  }
+
   const Operator* Uint32Op(Node* node) {
     return changer_->Uint32OperatorFor(node->opcode());
   }
@@ -776,6 +1059,102 @@
                                field_type, value);
   }
 
+  Graph* graph() const { return jsgraph_->graph(); }
+  CommonOperatorBuilder* common() const { return jsgraph_->common(); }
+  SimplifiedOperatorBuilder* simplified() const {
+    return jsgraph_->simplified();
+  }
+
+  void ReplaceEffectControlUses(Node* node, Node* effect, Node* control) {
+    for (Edge edge : node->use_edges()) {
+      if (NodeProperties::IsControlEdge(edge)) {
+        edge.UpdateTo(control);
+      } else if (NodeProperties::IsEffectEdge(edge)) {
+        edge.UpdateTo(effect);
+      } else {
+        DCHECK(NodeProperties::IsValueEdge(edge));
+      }
+    }
+  }
+
+  void ChangeToPureOp(Node* node, const Operator* new_op) {
+    if (node->op()->EffectInputCount() > 0) {
+      DCHECK_LT(0, node->op()->ControlInputCount());
+      // Disconnect the node from effect and control chains.
+      Node* control = NodeProperties::GetControlInput(node);
+      Node* effect = NodeProperties::GetEffectInput(node);
+      ReplaceEffectControlUses(node, effect, control);
+      node->TrimInputCount(new_op->ValueInputCount());
+    } else {
+      DCHECK_EQ(0, node->op()->ControlInputCount());
+    }
+
+    NodeProperties::ChangeOp(node, new_op);
+  }
+
+  void ChangeToInt32OverflowOp(Node* node, const Operator* new_op) {
+    NodeProperties::ChangeOp(node, new_op);
+  }
+
+  void VisitSpeculativeAdditiveOp(Node* node, Truncation truncation,
+                                  SimplifiedLowering* lowering) {
+    if (BothInputsAre(node, type_cache_.kSigned32OrMinusZero) &&
+        NodeProperties::GetType(node)->Is(Type::Signed32())) {
+      // int32 + int32 = int32   ==>   signed Int32Add/Sub
+      VisitInt32Binop(node);
+      if (lower()) ChangeToPureOp(node, Int32Op(node));
+      return;
+    }
+
+    // Use truncation if available.
+    if (BothInputsAre(node, type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
+        truncation.TruncatesToWord32()) {
+      // safe-int + safe-int = x (truncated to int32)
+      // => signed Int32Add/Sub (truncated)
+      VisitWord32TruncatingBinop(node);
+      if (lower()) ChangeToPureOp(node, Int32Op(node));
+      return;
+    }
+
+    // Try to use type feedback.
+    BinaryOperationHints::Hint hint = BinaryOperationHintOf(node->op());
+
+    // Handle the case when no int32 checks on inputs are necessary
+    // (but an overflow check is needed on the output).
+    if (BothInputsAre(node, Type::Signed32()) ||
+        (BothInputsAre(node, type_cache_.kSigned32OrMinusZero) &&
+         NodeProperties::GetType(node)->Is(type_cache_.kSafeInteger))) {
+      // If both the inputs the feedback are int32, use the overflow op.
+      if (hint == BinaryOperationHints::kSignedSmall ||
+          hint == BinaryOperationHints::kSigned32) {
+        VisitBinop(node, UseInfo::TruncatingWord32(),
+                   MachineRepresentation::kWord32, TypeCheckKind::kSigned32);
+        if (lower()) {
+          ChangeToInt32OverflowOp(node, Int32OverflowOp(node));
+        }
+        return;
+      }
+    }
+
+    if (hint == BinaryOperationHints::kSignedSmall ||
+        hint == BinaryOperationHints::kSigned32) {
+      VisitBinop(node, UseInfo::CheckedSigned32AsWord32(),
+                 MachineRepresentation::kWord32, TypeCheckKind::kSigned32);
+      if (lower()) {
+        ChangeToInt32OverflowOp(node, Int32OverflowOp(node));
+      }
+      return;
+    }
+
+    // default case => Float64Add/Sub
+    VisitBinop(node, UseInfo::CheckedNumberOrUndefinedAsFloat64(),
+               MachineRepresentation::kFloat64, TypeCheckKind::kNumber);
+    if (lower()) {
+      ChangeToPureOp(node, Float64Op(node));
+    }
+    return;
+  }
+
   // Dispatching routine for visiting the node {node} with the usage {use}.
   // Depending on the operator, propagate new usage info to the inputs.
   void VisitNode(Node* node, Truncation truncation,
@@ -813,15 +1192,15 @@
         ProcessInput(node, 0, UseInfo::Bool());
         ProcessInput(node, 1, UseInfo::AnyTagged());
         ProcessRemainingInputs(node, 2);
-        break;
+        return;
       case IrOpcode::kBranch:
         ProcessInput(node, 0, UseInfo::Bool());
         EnqueueInput(node, NodeProperties::FirstControlIndex(node));
-        break;
+        return;
       case IrOpcode::kSwitch:
         ProcessInput(node, 0, UseInfo::TruncatingWord32());
         EnqueueInput(node, NodeProperties::FirstControlIndex(node));
-        break;
+        return;
       case IrOpcode::kSelect:
         return VisitSelect(node, truncation, lowering);
       case IrOpcode::kPhi:
@@ -844,7 +1223,7 @@
         } else {
           SetOutput(node, MachineRepresentation::kTagged);
         }
-        break;
+        return;
       }
 
       //------------------------------------------------------------------
@@ -867,7 +1246,7 @@
           ProcessInput(node, 0, UseInfo::AnyTruncatingToBool());
           SetOutput(node, MachineRepresentation::kBit);
         }
-        break;
+        return;
       }
       case IrOpcode::kBooleanToNumber: {
         if (lower()) {
@@ -885,17 +1264,19 @@
           ProcessInput(node, 0, UseInfo::AnyTruncatingToBool());
           SetOutput(node, MachineRepresentation::kWord32);
         }
-        break;
+        return;
       }
       case IrOpcode::kNumberEqual:
       case IrOpcode::kNumberLessThan:
       case IrOpcode::kNumberLessThanOrEqual: {
         // Number comparisons reduce to integer comparisons for integer inputs.
-        if (BothInputsAreSigned32(node)) {
+        if (TypeOf(node->InputAt(0))->Is(Type::Signed32()) &&
+            TypeOf(node->InputAt(1))->Is(Type::Signed32())) {
           // => signed Int32Cmp
           VisitInt32Cmp(node);
           if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
-        } else if (BothInputsAreUnsigned32(node)) {
+        } else if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32()) &&
+                   TypeOf(node->InputAt(1))->Is(Type::Unsigned32())) {
           // => unsigned Int32Cmp
           VisitUint32Cmp(node);
           if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
@@ -904,8 +1285,47 @@
           VisitFloat64Cmp(node);
           if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
         }
-        break;
+        return;
       }
+
+      case IrOpcode::kSpeculativeNumberAdd:
+      case IrOpcode::kSpeculativeNumberSubtract:
+        return VisitSpeculativeAdditiveOp(node, truncation, lowering);
+
+      case IrOpcode::kSpeculativeNumberLessThan:
+      case IrOpcode::kSpeculativeNumberLessThanOrEqual:
+      case IrOpcode::kSpeculativeNumberEqual: {
+        // Number comparisons reduce to integer comparisons for integer inputs.
+        if (TypeOf(node->InputAt(0))->Is(Type::Signed32()) &&
+            TypeOf(node->InputAt(1))->Is(Type::Signed32())) {
+          // => signed Int32Cmp
+          VisitInt32Cmp(node);
+          if (lower()) ChangeToPureOp(node, Int32Op(node));
+          return;
+        } else if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32()) &&
+                   TypeOf(node->InputAt(1))->Is(Type::Unsigned32())) {
+          // => unsigned Int32Cmp
+          VisitUint32Cmp(node);
+          if (lower()) ChangeToPureOp(node, Uint32Op(node));
+          return;
+        }
+        // Try to use type feedback.
+        CompareOperationHints::Hint hint = CompareOperationHintOf(node->op());
+
+        if (hint == CompareOperationHints::kSignedSmall) {
+          VisitBinop(node, UseInfo::CheckedSigned32AsWord32(),
+                     MachineRepresentation::kBit);
+          if (lower()) ChangeToPureOp(node, Int32Op(node));
+          return;
+        }
+        DCHECK_EQ(CompareOperationHints::kNumber, hint);
+        // default case => Float64 comparison
+        VisitBinop(node, UseInfo::CheckedNumberOrUndefinedAsFloat64(),
+                   MachineRepresentation::kBit);
+        if (lower()) ChangeToPureOp(node, Float64Op(node));
+        return;
+      }
+
       case IrOpcode::kNumberAdd:
       case IrOpcode::kNumberSubtract: {
         if (BothInputsAre(node, Type::Signed32()) &&
@@ -914,7 +1334,8 @@
           // => signed Int32Add/Sub
           VisitInt32Binop(node);
           if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
-        } else if (BothInputsAre(node, type_cache_.kAdditiveSafeInteger) &&
+        } else if (BothInputsAre(node,
+                                 type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
                    truncation.TruncatesToWord32()) {
           // safe-int + safe-int = x (truncated to int32)
           // => signed Int32Add/Sub (truncated)
@@ -925,90 +1346,119 @@
           VisitFloat64Binop(node);
           if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
         }
-        break;
+        return;
       }
+      case IrOpcode::kSpeculativeNumberMultiply:
       case IrOpcode::kNumberMultiply: {
         if (BothInputsAreSigned32(node)) {
           if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
             // Multiply reduces to Int32Mul if the inputs and the output
             // are integers.
             VisitInt32Binop(node);
-            if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
-            break;
+            if (lower()) ChangeToPureOp(node, Int32Op(node));
+            return;
           }
           if (truncation.TruncatesToWord32() &&
-              NodeProperties::GetType(node)->Is(type_cache_.kSafeInteger)) {
+              NodeProperties::GetType(node)->Is(
+                  type_cache_.kSafeIntegerOrMinusZero)) {
             // Multiply reduces to Int32Mul if the inputs are integers,
             // the uses are truncating and the result is in the safe
             // integer range.
             VisitWord32TruncatingBinop(node);
-            if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
-            break;
+            if (lower()) ChangeToPureOp(node, Int32Op(node));
+            return;
           }
         }
-        // => Float64Mul
-        VisitFloat64Binop(node);
-        if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
-        break;
+        // Number x Number => Float64Mul
+        if (BothInputsAre(node, Type::NumberOrUndefined())) {
+          VisitFloat64Binop(node);
+          if (lower()) ChangeToPureOp(node, Float64Op(node));
+          return;
+        }
+        // Checked float64 x float64 => float64
+        DCHECK_EQ(IrOpcode::kSpeculativeNumberMultiply, node->opcode());
+        VisitBinop(node, UseInfo::CheckedNumberOrUndefinedAsFloat64(),
+                   MachineRepresentation::kFloat64, TypeCheckKind::kNumber);
+        if (lower()) ChangeToPureOp(node, Float64Op(node));
+        return;
       }
+      case IrOpcode::kSpeculativeNumberDivide:
       case IrOpcode::kNumberDivide: {
         if (BothInputsAreSigned32(node)) {
           if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
           // => signed Int32Div
           VisitInt32Binop(node);
           if (lower()) DeferReplacement(node, lowering->Int32Div(node));
-          break;
+          return;
           }
           if (truncation.TruncatesToWord32()) {
             // => signed Int32Div
             VisitWord32TruncatingBinop(node);
             if (lower()) DeferReplacement(node, lowering->Int32Div(node));
-            break;
+            return;
           }
         }
         if (BothInputsAreUnsigned32(node) && truncation.TruncatesToWord32()) {
           // => unsigned Uint32Div
           VisitWord32TruncatingBinop(node);
           if (lower()) DeferReplacement(node, lowering->Uint32Div(node));
-          break;
+          return;
         }
-        // => Float64Div
-        VisitFloat64Binop(node);
-        if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
-        break;
+        // Number x Number => Float64Div
+        if (BothInputsAre(node, Type::NumberOrUndefined())) {
+          VisitFloat64Binop(node);
+          if (lower()) ChangeToPureOp(node, Float64Op(node));
+          return;
+        }
+        // Checked float64 x float64 => float64
+        DCHECK_EQ(IrOpcode::kSpeculativeNumberDivide, node->opcode());
+        VisitBinop(node, UseInfo::CheckedNumberOrUndefinedAsFloat64(),
+                   MachineRepresentation::kFloat64, TypeCheckKind::kNumber);
+        if (lower()) ChangeToPureOp(node, Float64Op(node));
+        return;
       }
+      case IrOpcode::kSpeculativeNumberModulus:
       case IrOpcode::kNumberModulus: {
         if (BothInputsAreSigned32(node)) {
           if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
             // => signed Int32Mod
             VisitInt32Binop(node);
             if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
-            break;
+            return;
           }
           if (truncation.TruncatesToWord32()) {
             // => signed Int32Mod
             VisitWord32TruncatingBinop(node);
             if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
-            break;
+            return;
           }
         }
         if (BothInputsAreUnsigned32(node) && truncation.TruncatesToWord32()) {
           // => unsigned Uint32Mod
           VisitWord32TruncatingBinop(node);
           if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
-          break;
+          return;
         }
-        // => Float64Mod
-        VisitFloat64Binop(node);
-        if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
-        break;
+        // Number x Number => Float64Mod
+        if (BothInputsAre(node, Type::NumberOrUndefined())) {
+          // => Float64Mod
+          VisitFloat64Binop(node);
+          if (lower()) ChangeToPureOp(node, Float64Op(node));
+          return;
+        }
+        // Checked float64 x float64 => float64
+        DCHECK_EQ(IrOpcode::kSpeculativeNumberModulus, node->opcode());
+        VisitBinop(node, UseInfo::CheckedNumberOrUndefinedAsFloat64(),
+                   MachineRepresentation::kFloat64, TypeCheckKind::kNumber);
+        if (lower()) ChangeToPureOp(node, Float64Op(node));
+        return;
       }
       case IrOpcode::kNumberBitwiseOr:
       case IrOpcode::kNumberBitwiseXor:
       case IrOpcode::kNumberBitwiseAnd: {
         VisitInt32Binop(node);
         if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
-        break;
+        return;
       }
       case IrOpcode::kNumberShiftLeft: {
         Type* rhs_type = GetUpperBound(node->InputAt(1));
@@ -1017,7 +1467,7 @@
         if (lower()) {
           lowering->DoShift(node, lowering->machine()->Word32Shl(), rhs_type);
         }
-        break;
+        return;
       }
       case IrOpcode::kNumberShiftRight: {
         Type* rhs_type = GetUpperBound(node->InputAt(1));
@@ -1026,7 +1476,7 @@
         if (lower()) {
           lowering->DoShift(node, lowering->machine()->Word32Sar(), rhs_type);
         }
-        break;
+        return;
       }
       case IrOpcode::kNumberShiftRightLogical: {
         Type* rhs_type = GetUpperBound(node->InputAt(1));
@@ -1035,87 +1485,127 @@
         if (lower()) {
           lowering->DoShift(node, lowering->machine()->Word32Shr(), rhs_type);
         }
-        break;
+        return;
+      }
+      case IrOpcode::kNumberAbs: {
+        if (InputIs(node, Type::Unsigned32())) {
+          VisitUnop(node, UseInfo::TruncatingWord32(),
+                    MachineRepresentation::kWord32);
+          if (lower()) DeferReplacement(node, node->InputAt(0));
+        } else if (InputIs(node, type_cache_.kSafeSigned32)) {
+          VisitUnop(node, UseInfo::TruncatingWord32(),
+                    MachineRepresentation::kWord32);
+          if (lower()) DeferReplacement(node, lowering->Int32Abs(node));
+        } else if (InputIs(node,
+                           type_cache_.kPositiveIntegerOrMinusZeroOrNaN)) {
+          VisitUnop(node, UseInfo::TruncatingFloat64(),
+                    MachineRepresentation::kFloat64);
+          if (lower()) DeferReplacement(node, node->InputAt(0));
+        } else {
+          VisitUnop(node, UseInfo::TruncatingFloat64(),
+                    MachineRepresentation::kFloat64);
+          if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+        }
+        return;
       }
       case IrOpcode::kNumberClz32: {
         VisitUnop(node, UseInfo::TruncatingWord32(),
                   MachineRepresentation::kWord32);
         if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
-        break;
+        return;
       }
       case IrOpcode::kNumberImul: {
         VisitBinop(node, UseInfo::TruncatingWord32(),
                    UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
         if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
-        break;
+        return;
       }
       case IrOpcode::kNumberCeil: {
         VisitUnop(node, UseInfo::TruncatingFloat64(),
                   MachineRepresentation::kFloat64);
         if (lower()) DeferReplacement(node, lowering->Float64Ceil(node));
-        break;
+        return;
       }
       case IrOpcode::kNumberFloor: {
         VisitUnop(node, UseInfo::TruncatingFloat64(),
                   MachineRepresentation::kFloat64);
         if (lower()) DeferReplacement(node, lowering->Float64Floor(node));
-        break;
+        return;
+      }
+      case IrOpcode::kNumberFround: {
+        VisitUnop(node, UseInfo::TruncatingFloat64(),
+                  MachineRepresentation::kFloat32);
+        if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+        return;
+      }
+      case IrOpcode::kNumberAtan2: {
+        VisitBinop(node, UseInfo::TruncatingFloat64(),
+                   MachineRepresentation::kFloat64);
+        if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+        return;
+      }
+      case IrOpcode::kNumberAtan:
+      case IrOpcode::kNumberAtanh:
+      case IrOpcode::kNumberCos:
+      case IrOpcode::kNumberExp:
+      case IrOpcode::kNumberExpm1:
+      case IrOpcode::kNumberLog:
+      case IrOpcode::kNumberLog1p:
+      case IrOpcode::kNumberLog2:
+      case IrOpcode::kNumberLog10:
+      case IrOpcode::kNumberCbrt:
+      case IrOpcode::kNumberSin:
+      case IrOpcode::kNumberTan: {
+        VisitUnop(node, UseInfo::TruncatingFloat64(),
+                  MachineRepresentation::kFloat64);
+        if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+        return;
       }
       case IrOpcode::kNumberRound: {
         VisitUnop(node, UseInfo::TruncatingFloat64(),
                   MachineRepresentation::kFloat64);
         if (lower()) DeferReplacement(node, lowering->Float64Round(node));
-        break;
+        return;
+      }
+      case IrOpcode::kNumberSqrt: {
+        VisitUnop(node, UseInfo::TruncatingFloat64(),
+                  MachineRepresentation::kFloat64);
+        if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+        return;
       }
       case IrOpcode::kNumberTrunc: {
         VisitUnop(node, UseInfo::TruncatingFloat64(),
                   MachineRepresentation::kFloat64);
         if (lower()) DeferReplacement(node, lowering->Float64Trunc(node));
-        break;
+        return;
       }
       case IrOpcode::kNumberToInt32: {
         // Just change representation if necessary.
         VisitUnop(node, UseInfo::TruncatingWord32(),
                   MachineRepresentation::kWord32);
         if (lower()) DeferReplacement(node, node->InputAt(0));
-        break;
+        return;
       }
       case IrOpcode::kNumberToUint32: {
         // Just change representation if necessary.
         VisitUnop(node, UseInfo::TruncatingWord32(),
                   MachineRepresentation::kWord32);
         if (lower()) DeferReplacement(node, node->InputAt(0));
-        break;
-      }
-      case IrOpcode::kNumberIsHoleNaN: {
-        VisitUnop(node, UseInfo::TruncatingFloat64(),
-                  MachineRepresentation::kBit);
-        if (lower()) {
-          // NumberIsHoleNaN(x) => Word32Equal(Float64ExtractLowWord32(x),
-          //                                   #HoleNaNLower32)
-          node->ReplaceInput(0,
-                             jsgraph_->graph()->NewNode(
-                                 lowering->machine()->Float64ExtractLowWord32(),
-                                 node->InputAt(0)));
-          node->AppendInput(jsgraph_->zone(),
-                            jsgraph_->Int32Constant(kHoleNanLower32));
-          NodeProperties::ChangeOp(node, jsgraph_->machine()->Word32Equal());
-        }
-        break;
+        return;
       }
       case IrOpcode::kReferenceEqual: {
         VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
         if (lower()) {
           NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
         }
-        break;
+        return;
       }
       case IrOpcode::kStringEqual: {
         VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
         if (lower()) {
           // StringEqual(x, y) => Call(StringEqualStub, x, y, no-context)
           Operator::Properties properties =
-              Operator::kCommutative | Operator::kNoThrow;
+              Operator::kCommutative | Operator::kEliminatable;
           Callable callable = CodeFactory::StringEqual(jsgraph_->isolate());
           CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
           CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -1125,16 +1615,15 @@
                             jsgraph_->HeapConstant(callable.code()));
           node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
           node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
-          node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
           NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
         }
-        break;
+        return;
       }
       case IrOpcode::kStringLessThan: {
         VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
         if (lower()) {
           // StringLessThan(x, y) => Call(StringLessThanStub, x, y, no-context)
-          Operator::Properties properties = Operator::kNoThrow;
+          Operator::Properties properties = Operator::kEliminatable;
           Callable callable = CodeFactory::StringLessThan(jsgraph_->isolate());
           CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
           CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -1144,17 +1633,16 @@
                             jsgraph_->HeapConstant(callable.code()));
           node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
           node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
-          node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
           NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
         }
-        break;
+        return;
       }
       case IrOpcode::kStringLessThanOrEqual: {
         VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
         if (lower()) {
           // StringLessThanOrEqual(x, y)
           //   => Call(StringLessThanOrEqualStub, x, y, no-context)
-          Operator::Properties properties = Operator::kNoThrow;
+          Operator::Properties properties = Operator::kEliminatable;
           Callable callable =
               CodeFactory::StringLessThanOrEqual(jsgraph_->isolate());
           CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
@@ -1165,16 +1653,20 @@
                             jsgraph_->HeapConstant(callable.code()));
           node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
           node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
-          node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
           NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
         }
-        break;
+        return;
+      }
+      case IrOpcode::kStringFromCharCode: {
+        VisitUnop(node, UseInfo::TruncatingWord32(),
+                  MachineRepresentation::kTagged);
+        return;
       }
       case IrOpcode::kStringToNumber: {
         VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
         if (lower()) {
-          // StringToNumber(x) => Call(StringToNumberStub, x, no-context)
-          Operator::Properties properties = Operator::kNoThrow;
+          // StringToNumber(x) => Call(StringToNumber, x, no-context)
+          Operator::Properties properties = Operator::kEliminatable;
           Callable callable = CodeFactory::StringToNumber(jsgraph_->isolate());
           CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
           CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -1184,23 +1676,54 @@
                             jsgraph_->HeapConstant(callable.code()));
           node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
           node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
-          node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
           NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
         }
-        break;
+        return;
       }
+
+      case IrOpcode::kCheckBounds: {
+        VisitBinop(node, UseInfo::CheckedSigned32AsWord32(),
+                   UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
+        return;
+      }
+      case IrOpcode::kCheckTaggedPointer: {
+        VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+        if (lower()) {
+          if (InputIs(node, Type::TaggedPointer())) {
+            DeferReplacement(node, node->InputAt(0));
+          }
+        }
+        return;
+      }
+      case IrOpcode::kCheckTaggedSigned: {
+        if (SmiValuesAre32Bits() && truncation.TruncatesToWord32()) {
+          // TODO(jarin,bmeurer): Add CheckedSignedSmallAsWord32?
+          VisitUnop(node, UseInfo::CheckedSigned32AsWord32(),
+                    MachineRepresentation::kWord32);
+          if (lower()) DeferReplacement(node, node->InputAt(0));
+        } else {
+          VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+          if (lower()) {
+            if (InputIs(node, Type::TaggedSigned())) {
+              DeferReplacement(node, node->InputAt(0));
+            }
+          }
+        }
+        return;
+      }
+
       case IrOpcode::kAllocate: {
         ProcessInput(node, 0, UseInfo::TruncatingWord32());
         ProcessRemainingInputs(node, 1);
         SetOutput(node, MachineRepresentation::kTagged);
-        break;
+        return;
       }
       case IrOpcode::kLoadField: {
         FieldAccess access = FieldAccessOf(node->op());
         ProcessInput(node, 0, UseInfoForBasePointer(access));
         ProcessRemainingInputs(node, 1);
         SetOutput(node, access.machine_type.representation());
-        break;
+        return;
       }
       case IrOpcode::kStoreField: {
         FieldAccess access = FieldAccessOf(node->op());
@@ -1219,7 +1742,7 @@
                 node, jsgraph_->simplified()->StoreField(access));
           }
         }
-        break;
+        return;
       }
       case IrOpcode::kLoadBuffer: {
         BufferAccess access = BufferAccessOf(node->op());
@@ -1252,7 +1775,7 @@
         }
         SetOutput(node, output);
         if (lower()) lowering->DoLoadBuffer(node, output, changer_);
-        break;
+        return;
       }
       case IrOpcode::kStoreBuffer: {
         BufferAccess access = BufferAccessOf(node->op());
@@ -1265,7 +1788,7 @@
         ProcessRemainingInputs(node, 4);
         SetOutput(node, MachineRepresentation::kNone);
         if (lower()) lowering->DoStoreBuffer(node);
-        break;
+        return;
       }
       case IrOpcode::kLoadElement: {
         ElementAccess access = ElementAccessOf(node->op());
@@ -1273,7 +1796,7 @@
         ProcessInput(node, 1, UseInfo::TruncatingWord32());    // index
         ProcessRemainingInputs(node, 2);
         SetOutput(node, access.machine_type.representation());
-        break;
+        return;
       }
       case IrOpcode::kStoreElement: {
         ElementAccess access = ElementAccessOf(node->op());
@@ -1294,8 +1817,41 @@
                 node, jsgraph_->simplified()->StoreElement(access));
           }
         }
-        break;
+        return;
       }
+      case IrOpcode::kPlainPrimitiveToNumber:
+        if (truncation.TruncatesToWord32()) {
+          // TODO(jarin): Extend this to Number \/ Oddball
+          if (InputIs(node, Type::NumberOrUndefined())) {
+            VisitUnop(node, UseInfo::TruncatingWord32(),
+                      MachineRepresentation::kWord32);
+            if (lower()) DeferReplacement(node, node->InputAt(0));
+          } else {
+            VisitUnop(node, UseInfo::AnyTagged(),
+                      MachineRepresentation::kWord32);
+            if (lower()) {
+              NodeProperties::ChangeOp(node,
+                                       simplified()->PlainPrimitiveToWord32());
+            }
+          }
+        } else if (truncation.TruncatesToFloat64()) {
+          // TODO(jarin): Extend this to Number \/ Oddball
+          if (InputIs(node, Type::NumberOrUndefined())) {
+            VisitUnop(node, UseInfo::TruncatingFloat64(),
+                      MachineRepresentation::kFloat64);
+            if (lower()) DeferReplacement(node, node->InputAt(0));
+          } else {
+            VisitUnop(node, UseInfo::AnyTagged(),
+                      MachineRepresentation::kFloat64);
+            if (lower()) {
+              NodeProperties::ChangeOp(node,
+                                       simplified()->PlainPrimitiveToFloat64());
+            }
+          }
+        } else {
+          VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+        }
+        return;
       case IrOpcode::kObjectIsCallable:
       case IrOpcode::kObjectIsNumber:
       case IrOpcode::kObjectIsReceiver:
@@ -1304,7 +1860,33 @@
       case IrOpcode::kObjectIsUndetectable: {
         ProcessInput(node, 0, UseInfo::AnyTagged());
         SetOutput(node, MachineRepresentation::kBit);
-        break;
+        return;
+      }
+      case IrOpcode::kCheckFloat64Hole: {
+        CheckFloat64HoleMode mode = CheckFloat64HoleModeOf(node->op());
+        ProcessInput(node, 0, UseInfo::TruncatingFloat64());
+        ProcessRemainingInputs(node, 1);
+        SetOutput(node, MachineRepresentation::kFloat64);
+        if (truncation.TruncatesToFloat64() &&
+            mode == CheckFloat64HoleMode::kAllowReturnHole) {
+          if (lower()) DeferReplacement(node, node->InputAt(0));
+        }
+        return;
+      }
+      case IrOpcode::kCheckTaggedHole: {
+        CheckTaggedHoleMode mode = CheckTaggedHoleModeOf(node->op());
+        if (truncation.TruncatesToWord32() &&
+            mode == CheckTaggedHoleMode::kConvertHoleToUndefined) {
+          ProcessInput(node, 0, UseInfo::CheckedSigned32AsWord32());
+          ProcessRemainingInputs(node, 1);
+          SetOutput(node, MachineRepresentation::kWord32);
+          if (lower()) DeferReplacement(node, node->InputAt(0));
+        } else {
+          ProcessInput(node, 0, UseInfo::AnyTagged());
+          ProcessRemainingInputs(node, 1);
+          SetOutput(node, MachineRepresentation::kTagged);
+        }
+        return;
       }
 
       //------------------------------------------------------------------
@@ -1317,8 +1899,7 @@
         ProcessInput(node, 0, UseInfo::AnyTagged());   // tagged pointer
         ProcessInput(node, 1, UseInfo::PointerInt());  // index
         ProcessRemainingInputs(node, 2);
-        SetOutput(node, rep.representation());
-        break;
+        return SetOutput(node, rep.representation());
       }
       case IrOpcode::kStore: {
         // TODO(jarin) Eventually, we should get rid of all machine stores
@@ -1329,8 +1910,7 @@
         ProcessInput(node, 2,
                      TruncatingUseInfoFromRepresentation(rep.representation()));
         ProcessRemainingInputs(node, 3);
-        SetOutput(node, MachineRepresentation::kNone);
-        break;
+        return SetOutput(node, MachineRepresentation::kNone);
       }
       case IrOpcode::kWord32Shr:
         // We output unsigned int32 for shift right because JavaScript.
@@ -1415,10 +1995,6 @@
         return VisitUnop(node, UseInfo::TruncatingFloat64(),
                          MachineRepresentation::kWord32);
 
-      case IrOpcode::kChangeFloat32ToFloat64:
-        UNREACHABLE();
-        return VisitUnop(node, UseInfo::TruncatingFloat32(),
-                         MachineRepresentation::kFloat64);
       case IrOpcode::kChangeInt32ToFloat64:
         return VisitUnop(node, UseInfo::TruncatingWord32(),
                          MachineRepresentation::kFloat64);
@@ -1440,6 +2016,9 @@
       case IrOpcode::kFloat64RoundUp:
         return VisitUnop(node, UseInfo::TruncatingFloat64(),
                          MachineRepresentation::kFloat64);
+      case IrOpcode::kFloat64SilenceNaN:
+        return VisitUnop(node, UseInfo::TruncatingFloat64(),
+                         MachineRepresentation::kFloat64);
       case IrOpcode::kFloat64Equal:
       case IrOpcode::kFloat64LessThan:
       case IrOpcode::kFloat64LessThanOrEqual:
@@ -1453,13 +2032,17 @@
         return VisitBinop(node, UseInfo::TruncatingFloat64(),
                           UseInfo::TruncatingWord32(),
                           MachineRepresentation::kFloat64);
+      case IrOpcode::kNumberSilenceNaN:
+        VisitUnop(node, UseInfo::TruncatingFloat64(),
+                  MachineRepresentation::kFloat64);
+        if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+        return;
       case IrOpcode::kLoadStackPointer:
       case IrOpcode::kLoadFramePointer:
       case IrOpcode::kLoadParentFramePointer:
         return VisitLeaf(node, MachineType::PointerRepresentation());
       case IrOpcode::kStateValues:
-        VisitStateValues(node);
-        break;
+        return VisitStateValues(node);
 
       // The following opcodes are not produced before representation
       // inference runs, so we do not have any real test coverage.
@@ -1467,14 +2050,24 @@
       case IrOpcode::kChangeFloat64ToInt32:
       case IrOpcode::kChangeFloat64ToUint32:
       case IrOpcode::kTruncateInt64ToInt32:
+      case IrOpcode::kChangeFloat32ToFloat64:
+      case IrOpcode::kCheckedInt32Add:
+      case IrOpcode::kCheckedInt32Sub:
+      case IrOpcode::kCheckedUint32ToInt32:
+      case IrOpcode::kCheckedFloat64ToInt32:
+      case IrOpcode::kCheckedTaggedToInt32:
+      case IrOpcode::kCheckedTaggedToFloat64:
+      case IrOpcode::kPlainPrimitiveToWord32:
+      case IrOpcode::kPlainPrimitiveToFloat64:
         FATAL("Representation inference: unsupported opcodes.");
+        break;
 
       default:
         VisitInputs(node);
         // Assume the output is tagged.
-        SetOutput(node, MachineRepresentation::kTagged);
-        break;
+        return SetOutput(node, MachineRepresentation::kTagged);
     }
+    UNREACHABLE();
   }
 
   void DeferReplacement(Node* node, Node* replacement) {
@@ -1482,8 +2075,20 @@
           node->op()->mnemonic(), replacement->id(),
           replacement->op()->mnemonic());
 
+    // Disconnect the node from effect and control chains, if necessary.
+    if (node->op()->EffectInputCount() > 0) {
+      DCHECK_LT(0, node->op()->ControlInputCount());
+      // Disconnect the node from effect and control chains.
+      Node* control = NodeProperties::GetControlInput(node);
+      Node* effect = NodeProperties::GetEffectInput(node);
+      ReplaceEffectControlUses(node, effect, control);
+    } else {
+      DCHECK_EQ(0, node->op()->ControlInputCount());
+    }
+
     if (replacement->id() < count_ &&
-        GetUpperBound(node)->Is(GetUpperBound(replacement))) {
+        GetUpperBound(node)->Is(GetUpperBound(replacement)) &&
+        TypeOf(node)->Is(TypeOf(replacement))) {
       // Replace with a previously existing node eagerly only if the type is the
       // same.
       node->ReplaceUses(replacement);
@@ -1515,19 +2120,20 @@
   void PrintTruncation(Truncation truncation) {
     if (FLAG_trace_representation) {
       OFStream os(stdout);
-      os << truncation.description();
+      os << truncation.description() << std::endl;
     }
   }
 
   void PrintUseInfo(UseInfo info) {
     if (FLAG_trace_representation) {
       OFStream os(stdout);
-      os << info.preferred() << ":" << info.truncation().description();
+      os << info.representation() << ":" << info.truncation().description();
     }
   }
 
  private:
   JSGraph* jsgraph_;
+  Zone* zone_;                      // Temporary zone.
   size_t const count_;              // number of nodes in the graph
   ZoneVector<NodeInfo> info_;       // node id -> usage information
 #ifdef DEBUG
@@ -1539,6 +2145,12 @@
   Phase phase_;                     // current phase of algorithm
   RepresentationChanger* changer_;  // for inserting representation changes
   ZoneQueue<Node*> queue_;          // queue for traversing the graph
+
+  struct NodeState {
+    Node* node;
+    int input_index;
+  };
+  ZoneStack<NodeState> typing_stack_;  // stack for graph typing.
   // TODO(danno): RepresentationSelector shouldn't know anything about the
   // source positions table, but must for now since there currently is no other
   // way to pass down source position information to nodes created during
@@ -1546,23 +2158,26 @@
   // position information via the SourcePositionWrapper like all other reducers.
   SourcePositionTable* source_positions_;
   TypeCache const& type_cache_;
+  OperationTyper op_typer_;  // helper for the feedback typer
 
   NodeInfo* GetInfo(Node* node) {
     DCHECK(node->id() >= 0);
     DCHECK(node->id() < count_);
     return &info_[node->id()];
   }
+  Zone* zone() { return zone_; }
+  Zone* graph_zone() { return jsgraph_->zone(); }
 };
 
-
 SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, Zone* zone,
-                                       SourcePositionTable* source_positions)
+                                       SourcePositionTable* source_positions,
+                                       Flags flags)
     : jsgraph_(jsgraph),
       zone_(zone),
       type_cache_(TypeCache::Get()),
+      flags_(flags),
       source_positions_(source_positions) {}
 
-
 void SimplifiedLowering::LowerAllNodes() {
   RepresentationChanger changer(jsgraph(), jsgraph()->isolate());
   RepresentationSelector selector(jsgraph(), zone_, &changer,
@@ -1758,8 +2373,8 @@
     Type* element_type =
         Type::Intersect(NodeProperties::GetType(node), Type::Number(), zone());
     Node* vtrue = changer->GetRepresentationFor(
-        etrue, access_type.representation(), element_type, output_rep,
-        Truncation::None());
+        etrue, access_type.representation(), element_type, node,
+        UseInfo(output_rep, Truncation::None()));
 
     Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
     Node* efalse = effect;
@@ -2188,6 +2803,17 @@
                           vtrue0, vfalse0, merge0);
 }
 
+Node* SimplifiedLowering::Int32Abs(Node* const node) {
+  Node* const zero = jsgraph()->Int32Constant(0);
+  Node* const input = node->InputAt(0);
+
+  // if 0 < input then input else 0 - input
+  return graph()->NewNode(
+      common()->Select(MachineRepresentation::kWord32, BranchHint::kTrue),
+      graph()->NewNode(machine()->Int32LessThan(), zero, input), input,
+      graph()->NewNode(machine()->Int32Sub(), zero, input));
+}
+
 Node* SimplifiedLowering::Int32Div(Node* const node) {
   Int32BinopMatcher m(node);
   Node* const zero = jsgraph()->Int32Constant(0);
diff --git a/src/compiler/simplified-lowering.h b/src/compiler/simplified-lowering.h
index baffe20..75fd9c2 100644
--- a/src/compiler/simplified-lowering.h
+++ b/src/compiler/simplified-lowering.h
@@ -5,6 +5,7 @@
 #ifndef V8_COMPILER_SIMPLIFIED_LOWERING_H_
 #define V8_COMPILER_SIMPLIFIED_LOWERING_H_
 
+#include "src/base/flags.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/machine-operator.h"
 #include "src/compiler/node.h"
@@ -26,8 +27,11 @@
 
 class SimplifiedLowering final {
  public:
+  enum Flag { kNoFlag = 0u, kTypeFeedbackEnabled = 1u << 0 };
+  typedef base::Flags<Flag> Flags;
   SimplifiedLowering(JSGraph* jsgraph, Zone* zone,
-                     SourcePositionTable* source_positions);
+                     SourcePositionTable* source_positions,
+                     Flags flags = kNoFlag);
   ~SimplifiedLowering() {}
 
   void LowerAllNodes();
@@ -43,12 +47,15 @@
   void DoStoreBuffer(Node* node);
   void DoShift(Node* node, Operator const* op, Type* rhs_type);
 
+  Flags flags() const { return flags_; }
+
  private:
   JSGraph* const jsgraph_;
   Zone* const zone_;
   TypeCache const& type_cache_;
   SetOncePointer<Node> to_number_code_;
   SetOncePointer<Operator const> to_number_operator_;
+  Flags flags_;
 
   // TODO(danno): SimplifiedLowering shouldn't know anything about the source
   // positions table, but must for now since there currently is no other way to
@@ -61,6 +68,7 @@
   Node* Float64Floor(Node* const node);
   Node* Float64Round(Node* const node);
   Node* Float64Trunc(Node* const node);
+  Node* Int32Abs(Node* const node);
   Node* Int32Div(Node* const node);
   Node* Int32Mod(Node* const node);
   Node* Uint32Div(Node* const node);
diff --git a/src/compiler/simplified-operator-reducer.cc b/src/compiler/simplified-operator-reducer.cc
index 6fbf16e..5db9dfb 100644
--- a/src/compiler/simplified-operator-reducer.cc
+++ b/src/compiler/simplified-operator-reducer.cc
@@ -16,8 +16,27 @@
 namespace internal {
 namespace compiler {
 
-SimplifiedOperatorReducer::SimplifiedOperatorReducer(JSGraph* jsgraph)
-    : jsgraph_(jsgraph), type_cache_(TypeCache::Get()) {}
+namespace {
+
+Decision DecideObjectIsSmi(Node* const input) {
+  NumberMatcher m(input);
+  if (m.HasValue()) {
+    return IsSmiDouble(m.Value()) ? Decision::kTrue : Decision::kFalse;
+  }
+  if (m.IsAllocate()) return Decision::kFalse;
+  if (m.IsChangeBitToTagged()) return Decision::kFalse;
+  if (m.IsChangeInt31ToTaggedSigned()) return Decision::kTrue;
+  if (m.IsHeapConstant()) return Decision::kFalse;
+  return Decision::kUnknown;
+}
+
+}  // namespace
+
+SimplifiedOperatorReducer::SimplifiedOperatorReducer(Editor* editor,
+                                                     JSGraph* jsgraph)
+    : AdvancedReducer(editor),
+      jsgraph_(jsgraph),
+      type_cache_(TypeCache::Get()) {}
 
 SimplifiedOperatorReducer::~SimplifiedOperatorReducer() {}
 
@@ -60,7 +79,8 @@
       }
       break;
     }
-    case IrOpcode::kChangeTaggedToFloat64: {
+    case IrOpcode::kChangeTaggedToFloat64:
+    case IrOpcode::kTruncateTaggedToFloat64: {
       NumberMatcher m(node->InputAt(0));
       if (m.HasValue()) return ReplaceFloat64(m.Value());
       if (m.IsChangeFloat64ToTagged()) return Replace(m.node()->InputAt(0));
@@ -109,6 +129,39 @@
       }
       break;
     }
+    case IrOpcode::kCheckTaggedPointer: {
+      Node* const input = node->InputAt(0);
+      if (DecideObjectIsSmi(input) == Decision::kFalse) {
+        ReplaceWithValue(node, input);
+        return Replace(input);
+      }
+      break;
+    }
+    case IrOpcode::kCheckTaggedSigned: {
+      Node* const input = node->InputAt(0);
+      if (DecideObjectIsSmi(input) == Decision::kTrue) {
+        ReplaceWithValue(node, input);
+        return Replace(input);
+      }
+      break;
+    }
+    case IrOpcode::kObjectIsSmi: {
+      Node* const input = node->InputAt(0);
+      switch (DecideObjectIsSmi(input)) {
+        case Decision::kTrue:
+          return ReplaceBoolean(true);
+        case Decision::kFalse:
+          return ReplaceBoolean(false);
+        case Decision::kUnknown:
+          break;
+      }
+      break;
+    }
+    case IrOpcode::kNumberAbs: {
+      NumberMatcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceNumber(std::fabs(m.Value()));
+      break;
+    }
     case IrOpcode::kNumberCeil:
     case IrOpcode::kNumberFloor:
     case IrOpcode::kNumberRound:
@@ -164,6 +217,9 @@
   return Changed(node);
 }
 
+Reduction SimplifiedOperatorReducer::ReplaceBoolean(bool value) {
+  return Replace(jsgraph()->BooleanConstant(value));
+}
 
 Reduction SimplifiedOperatorReducer::ReplaceFloat64(double value) {
   return Replace(jsgraph()->Float64Constant(value));
diff --git a/src/compiler/simplified-operator-reducer.h b/src/compiler/simplified-operator-reducer.h
index 70750a8..6ee903b 100644
--- a/src/compiler/simplified-operator-reducer.h
+++ b/src/compiler/simplified-operator-reducer.h
@@ -20,10 +20,9 @@
 class MachineOperatorBuilder;
 class SimplifiedOperatorBuilder;
 
-
-class SimplifiedOperatorReducer final : public Reducer {
+class SimplifiedOperatorReducer final : public AdvancedReducer {
  public:
-  explicit SimplifiedOperatorReducer(JSGraph* jsgraph);
+  SimplifiedOperatorReducer(Editor* editor, JSGraph* jsgraph);
   ~SimplifiedOperatorReducer() final;
 
   Reduction Reduce(Node* node) final;
@@ -33,6 +32,7 @@
   Reduction ReduceTypeGuard(Node* node);
 
   Reduction Change(Node* node, const Operator* op, Node* a);
+  Reduction ReplaceBoolean(bool value);
   Reduction ReplaceFloat64(double value);
   Reduction ReplaceInt32(int32_t value);
   Reduction ReplaceUint32(uint32_t value) {
diff --git a/src/compiler/simplified-operator.cc b/src/compiler/simplified-operator.cc
index 0350403..0f32b0c 100644
--- a/src/compiler/simplified-operator.cc
+++ b/src/compiler/simplified-operator.cc
@@ -172,11 +172,67 @@
   return OpParameter<ElementAccess>(op);
 }
 
+size_t hash_value(CheckFloat64HoleMode mode) {
+  return static_cast<size_t>(mode);
+}
+
+std::ostream& operator<<(std::ostream& os, CheckFloat64HoleMode mode) {
+  switch (mode) {
+    case CheckFloat64HoleMode::kAllowReturnHole:
+      return os << "allow-return-hole";
+    case CheckFloat64HoleMode::kNeverReturnHole:
+      return os << "never-return-hole";
+  }
+  UNREACHABLE();
+  return os;
+}
+
+CheckFloat64HoleMode CheckFloat64HoleModeOf(const Operator* op) {
+  DCHECK_EQ(IrOpcode::kCheckFloat64Hole, op->opcode());
+  return OpParameter<CheckFloat64HoleMode>(op);
+}
+
+size_t hash_value(CheckTaggedHoleMode mode) {
+  return static_cast<size_t>(mode);
+}
+
+std::ostream& operator<<(std::ostream& os, CheckTaggedHoleMode mode) {
+  switch (mode) {
+    case CheckTaggedHoleMode::kConvertHoleToUndefined:
+      return os << "convert-hole-to-undefined";
+    case CheckTaggedHoleMode::kNeverReturnHole:
+      return os << "never-return-hole";
+  }
+  UNREACHABLE();
+  return os;
+}
+
+CheckTaggedHoleMode CheckTaggedHoleModeOf(const Operator* op) {
+  DCHECK_EQ(IrOpcode::kCheckTaggedHole, op->opcode());
+  return OpParameter<CheckTaggedHoleMode>(op);
+}
+
 Type* TypeOf(const Operator* op) {
   DCHECK_EQ(IrOpcode::kTypeGuard, op->opcode());
   return OpParameter<Type*>(op);
 }
 
+BinaryOperationHints::Hint BinaryOperationHintOf(const Operator* op) {
+  DCHECK(op->opcode() == IrOpcode::kSpeculativeNumberAdd ||
+         op->opcode() == IrOpcode::kSpeculativeNumberSubtract ||
+         op->opcode() == IrOpcode::kSpeculativeNumberMultiply ||
+         op->opcode() == IrOpcode::kSpeculativeNumberDivide ||
+         op->opcode() == IrOpcode::kSpeculativeNumberModulus);
+  return OpParameter<BinaryOperationHints::Hint>(op);
+}
+
+CompareOperationHints::Hint CompareOperationHintOf(const Operator* op) {
+  DCHECK(op->opcode() == IrOpcode::kSpeculativeNumberEqual ||
+         op->opcode() == IrOpcode::kSpeculativeNumberLessThan ||
+         op->opcode() == IrOpcode::kSpeculativeNumberLessThanOrEqual);
+  return OpParameter<CompareOperationHints::Hint>(op);
+}
+
 #define PURE_OP_LIST(V)                                    \
   V(BooleanNot, Operator::kNoProperties, 1)                \
   V(BooleanToNumber, Operator::kNoProperties, 1)           \
@@ -195,15 +251,35 @@
   V(NumberShiftRight, Operator::kNoProperties, 2)          \
   V(NumberShiftRightLogical, Operator::kNoProperties, 2)   \
   V(NumberImul, Operator::kCommutative, 2)                 \
+  V(NumberAbs, Operator::kNoProperties, 1)                 \
   V(NumberClz32, Operator::kNoProperties, 1)               \
   V(NumberCeil, Operator::kNoProperties, 1)                \
   V(NumberFloor, Operator::kNoProperties, 1)               \
+  V(NumberFround, Operator::kNoProperties, 1)              \
+  V(NumberAtan, Operator::kNoProperties, 1)                \
+  V(NumberAtan2, Operator::kNoProperties, 2)               \
+  V(NumberAtanh, Operator::kNoProperties, 1)               \
+  V(NumberCbrt, Operator::kNoProperties, 1)                \
+  V(NumberCos, Operator::kNoProperties, 1)                 \
+  V(NumberExp, Operator::kNoProperties, 1)                 \
+  V(NumberExpm1, Operator::kNoProperties, 1)               \
+  V(NumberLog, Operator::kNoProperties, 1)                 \
+  V(NumberLog1p, Operator::kNoProperties, 1)               \
+  V(NumberLog10, Operator::kNoProperties, 1)               \
+  V(NumberLog2, Operator::kNoProperties, 1)                \
   V(NumberRound, Operator::kNoProperties, 1)               \
+  V(NumberSin, Operator::kNoProperties, 1)                 \
+  V(NumberSqrt, Operator::kNoProperties, 1)                \
+  V(NumberTan, Operator::kNoProperties, 1)                 \
   V(NumberTrunc, Operator::kNoProperties, 1)               \
   V(NumberToInt32, Operator::kNoProperties, 1)             \
   V(NumberToUint32, Operator::kNoProperties, 1)            \
-  V(NumberIsHoleNaN, Operator::kNoProperties, 1)           \
+  V(NumberSilenceNaN, Operator::kNoProperties, 1)          \
+  V(StringFromCharCode, Operator::kNoProperties, 1)        \
   V(StringToNumber, Operator::kNoProperties, 1)            \
+  V(PlainPrimitiveToNumber, Operator::kNoProperties, 1)    \
+  V(PlainPrimitiveToWord32, Operator::kNoProperties, 1)    \
+  V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1)   \
   V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1) \
   V(ChangeTaggedToInt32, Operator::kNoProperties, 1)       \
   V(ChangeTaggedToUint32, Operator::kNoProperties, 1)      \
@@ -215,6 +291,7 @@
   V(ChangeTaggedToBit, Operator::kNoProperties, 1)         \
   V(ChangeBitToTagged, Operator::kNoProperties, 1)         \
   V(TruncateTaggedToWord32, Operator::kNoProperties, 1)    \
+  V(TruncateTaggedToFloat64, Operator::kNoProperties, 1)   \
   V(ObjectIsCallable, Operator::kNoProperties, 1)          \
   V(ObjectIsNumber, Operator::kNoProperties, 1)            \
   V(ObjectIsReceiver, Operator::kNoProperties, 1)          \
@@ -225,6 +302,23 @@
   V(StringLessThan, Operator::kNoProperties, 2)            \
   V(StringLessThanOrEqual, Operator::kNoProperties, 2)
 
+#define SPECULATIVE_BINOP_LIST(V) \
+  V(SpeculativeNumberAdd)         \
+  V(SpeculativeNumberSubtract)    \
+  V(SpeculativeNumberDivide)      \
+  V(SpeculativeNumberMultiply)    \
+  V(SpeculativeNumberModulus)
+
+#define CHECKED_OP_LIST(V)    \
+  V(CheckTaggedPointer, 1)    \
+  V(CheckTaggedSigned, 1)     \
+  V(CheckedInt32Add, 2)       \
+  V(CheckedInt32Sub, 2)       \
+  V(CheckedUint32ToInt32, 1)  \
+  V(CheckedFloat64ToInt32, 1) \
+  V(CheckedTaggedToInt32, 1)  \
+  V(CheckedTaggedToFloat64, 1)
+
 struct SimplifiedOperatorGlobalCache final {
 #define PURE(Name, properties, input_count)                                \
   struct Name##Operator final : public Operator {                          \
@@ -236,11 +330,51 @@
   PURE_OP_LIST(PURE)
 #undef PURE
 
+#define CHECKED(Name, value_input_count)                            \
+  struct Name##Operator final : public Operator {                   \
+    Name##Operator()                                                \
+        : Operator(IrOpcode::k##Name,                               \
+                   Operator::kFoldable | Operator::kNoThrow, #Name, \
+                   value_input_count, 1, 1, 1, 1, 0) {}             \
+  };                                                                \
+  Name##Operator k##Name;
+  CHECKED_OP_LIST(CHECKED)
+#undef CHECKED
+
+  template <CheckFloat64HoleMode kMode>
+  struct CheckFloat64HoleNaNOperator final
+      : public Operator1<CheckFloat64HoleMode> {
+    CheckFloat64HoleNaNOperator()
+        : Operator1<CheckFloat64HoleMode>(
+              IrOpcode::kCheckFloat64Hole,
+              Operator::kFoldable | Operator::kNoThrow, "CheckFloat64Hole", 1,
+              1, 1, 1, 1, 0, kMode) {}
+  };
+  CheckFloat64HoleNaNOperator<CheckFloat64HoleMode::kAllowReturnHole>
+      kCheckFloat64HoleAllowReturnHoleOperator;
+  CheckFloat64HoleNaNOperator<CheckFloat64HoleMode::kNeverReturnHole>
+      kCheckFloat64HoleNeverReturnHoleOperator;
+
+  template <CheckTaggedHoleMode kMode>
+  struct CheckTaggedHoleOperator final : public Operator1<CheckTaggedHoleMode> {
+    CheckTaggedHoleOperator()
+        : Operator1<CheckTaggedHoleMode>(
+              IrOpcode::kCheckTaggedHole,
+              Operator::kFoldable | Operator::kNoThrow, "CheckTaggedHole", 1, 1,
+              1, 1, 1, 0, kMode) {}
+  };
+  CheckTaggedHoleOperator<CheckTaggedHoleMode::kConvertHoleToUndefined>
+      kCheckTaggedHoleConvertHoleToUndefinedOperator;
+  CheckTaggedHoleOperator<CheckTaggedHoleMode::kNeverReturnHole>
+      kCheckTaggedHoleNeverReturnHoleOperator;
+
   template <PretenureFlag kPretenure>
   struct AllocateOperator final : public Operator1<PretenureFlag> {
     AllocateOperator()
-        : Operator1<PretenureFlag>(IrOpcode::kAllocate, Operator::kNoThrow,
-                                   "Allocate", 1, 1, 1, 1, 1, 0, kPretenure) {}
+        : Operator1<PretenureFlag>(
+              IrOpcode::kAllocate,
+              Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,
+              "Allocate", 1, 1, 1, 1, 1, 0, kPretenure) {}
   };
   AllocateOperator<NOT_TENURED> kAllocateNotTenuredOperator;
   AllocateOperator<TENURED> kAllocateTenuredOperator;
@@ -248,17 +382,19 @@
 #define BUFFER_ACCESS(Type, type, TYPE, ctype, size)                          \
   struct LoadBuffer##Type##Operator final : public Operator1<BufferAccess> {  \
     LoadBuffer##Type##Operator()                                              \
-        : Operator1<BufferAccess>(IrOpcode::kLoadBuffer,                      \
-                                  Operator::kNoThrow | Operator::kNoWrite,    \
-                                  "LoadBuffer", 3, 1, 1, 1, 1, 0,             \
-                                  BufferAccess(kExternal##Type##Array)) {}    \
+        : Operator1<BufferAccess>(                                            \
+              IrOpcode::kLoadBuffer,                                          \
+              Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,   \
+              "LoadBuffer", 3, 1, 1, 1, 1, 0,                                 \
+              BufferAccess(kExternal##Type##Array)) {}                        \
   };                                                                          \
   struct StoreBuffer##Type##Operator final : public Operator1<BufferAccess> { \
     StoreBuffer##Type##Operator()                                             \
-        : Operator1<BufferAccess>(IrOpcode::kStoreBuffer,                     \
-                                  Operator::kNoRead | Operator::kNoThrow,     \
-                                  "StoreBuffer", 4, 1, 1, 0, 1, 0,            \
-                                  BufferAccess(kExternal##Type##Array)) {}    \
+        : Operator1<BufferAccess>(                                            \
+              IrOpcode::kStoreBuffer,                                         \
+              Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,    \
+              "StoreBuffer", 4, 1, 1, 0, 1, 0,                                \
+              BufferAccess(kExternal##Type##Array)) {}                        \
   };                                                                          \
   LoadBuffer##Type##Operator kLoadBuffer##Type;                               \
   StoreBuffer##Type##Operator kStoreBuffer##Type;
@@ -274,12 +410,39 @@
 SimplifiedOperatorBuilder::SimplifiedOperatorBuilder(Zone* zone)
     : cache_(kCache.Get()), zone_(zone) {}
 
-
 #define GET_FROM_CACHE(Name, properties, input_count) \
   const Operator* SimplifiedOperatorBuilder::Name() { return &cache_.k##Name; }
 PURE_OP_LIST(GET_FROM_CACHE)
 #undef GET_FROM_CACHE
 
+#define GET_FROM_CACHE(Name, value_input_count) \
+  const Operator* SimplifiedOperatorBuilder::Name() { return &cache_.k##Name; }
+CHECKED_OP_LIST(GET_FROM_CACHE)
+#undef GET_FROM_CACHE
+
+const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole(
+    CheckFloat64HoleMode mode) {
+  switch (mode) {
+    case CheckFloat64HoleMode::kAllowReturnHole:
+      return &cache_.kCheckFloat64HoleAllowReturnHoleOperator;
+    case CheckFloat64HoleMode::kNeverReturnHole:
+      return &cache_.kCheckFloat64HoleNeverReturnHoleOperator;
+  }
+  UNREACHABLE();
+  return nullptr;
+}
+
+const Operator* SimplifiedOperatorBuilder::CheckTaggedHole(
+    CheckTaggedHoleMode mode) {
+  switch (mode) {
+    case CheckTaggedHoleMode::kConvertHoleToUndefined:
+      return &cache_.kCheckTaggedHoleConvertHoleToUndefinedOperator;
+    case CheckTaggedHoleMode::kNeverReturnHole:
+      return &cache_.kCheckTaggedHoleNeverReturnHoleOperator;
+  }
+  UNREACHABLE();
+  return nullptr;
+}
 
 const Operator* SimplifiedOperatorBuilder::ReferenceEqual(Type* type) {
   return new (zone()) Operator(IrOpcode::kReferenceEqual,
@@ -287,6 +450,13 @@
                                "ReferenceEqual", 2, 0, 0, 1, 0, 0);
 }
 
+const Operator* SimplifiedOperatorBuilder::CheckBounds() {
+  // TODO(bmeurer): Cache this operator. Make it pure!
+  return new (zone())
+      Operator(IrOpcode::kCheckBounds, Operator::kFoldable | Operator::kNoThrow,
+               "CheckBounds", 2, 1, 1, 1, 1, 0);
+}
+
 const Operator* SimplifiedOperatorBuilder::TypeGuard(Type* type) {
   class TypeGuardOperator final : public Operator1<Type*> {
    public:
@@ -341,6 +511,39 @@
   return nullptr;
 }
 
+#define SPECULATIVE_BINOP_DEF(Name)                                            \
+  const Operator* SimplifiedOperatorBuilder::Name(                             \
+      BinaryOperationHints::Hint hint) {                                       \
+    return new (zone()) Operator1<BinaryOperationHints::Hint>(                 \
+        IrOpcode::k##Name, Operator::kFoldable | Operator::kNoThrow, #Name, 2, \
+        1, 1, 1, 1, 0, hint);                                                  \
+  }
+SPECULATIVE_BINOP_LIST(SPECULATIVE_BINOP_DEF)
+#undef SPECULATIVE_BINOP_DEF
+
+const Operator* SimplifiedOperatorBuilder::SpeculativeNumberEqual(
+    CompareOperationHints::Hint hint) {
+  return new (zone()) Operator1<CompareOperationHints::Hint>(
+      IrOpcode::kSpeculativeNumberEqual,
+      Operator::kFoldable | Operator::kNoThrow, "SpeculativeNumberEqual", 2, 1,
+      1, 1, 1, 0, hint);
+}
+
+const Operator* SimplifiedOperatorBuilder::SpeculativeNumberLessThan(
+    CompareOperationHints::Hint hint) {
+  return new (zone()) Operator1<CompareOperationHints::Hint>(
+      IrOpcode::kSpeculativeNumberLessThan,
+      Operator::kFoldable | Operator::kNoThrow, "SpeculativeNumberLessThan", 2,
+      1, 1, 1, 1, 0, hint);
+}
+
+const Operator* SimplifiedOperatorBuilder::SpeculativeNumberLessThanOrEqual(
+    CompareOperationHints::Hint hint) {
+  return new (zone()) Operator1<CompareOperationHints::Hint>(
+      IrOpcode::kSpeculativeNumberLessThanOrEqual,
+      Operator::kFoldable | Operator::kNoThrow,
+      "SpeculativeNumberLessThanOrEqual", 2, 1, 1, 1, 1, 0, hint);
+}
 
 #define ACCESS_OP_LIST(V)                                    \
   V(LoadField, FieldAccess, Operator::kNoWrite, 1, 1, 1)     \
@@ -348,12 +551,12 @@
   V(LoadElement, ElementAccess, Operator::kNoWrite, 2, 1, 1) \
   V(StoreElement, ElementAccess, Operator::kNoRead, 3, 1, 0)
 
-
 #define ACCESS(Name, Type, properties, value_input_count, control_input_count, \
                output_count)                                                   \
   const Operator* SimplifiedOperatorBuilder::Name(const Type& access) {        \
     return new (zone())                                                        \
-        Operator1<Type>(IrOpcode::k##Name, Operator::kNoThrow | properties,    \
+        Operator1<Type>(IrOpcode::k##Name,                                     \
+                        Operator::kNoDeopt | Operator::kNoThrow | properties,  \
                         #Name, value_input_count, 1, control_input_count,      \
                         output_count, 1, 0, access);                           \
   }
diff --git a/src/compiler/simplified-operator.h b/src/compiler/simplified-operator.h
index 20d8a39..ffdf33f 100644
--- a/src/compiler/simplified-operator.h
+++ b/src/compiler/simplified-operator.h
@@ -7,6 +7,7 @@
 
 #include <iosfwd>
 
+#include "src/compiler/type-hints.h"
 #include "src/handles.h"
 #include "src/machine-type.h"
 #include "src/objects.h"
@@ -102,8 +103,34 @@
 
 ElementAccess const& ElementAccessOf(const Operator* op) WARN_UNUSED_RESULT;
 
+enum class CheckFloat64HoleMode : uint8_t {
+  kNeverReturnHole,  // Never return the hole (deoptimize instead).
+  kAllowReturnHole   // Allow to return the hole (signaling NaN).
+};
+
+size_t hash_value(CheckFloat64HoleMode);
+
+std::ostream& operator<<(std::ostream&, CheckFloat64HoleMode);
+
+CheckFloat64HoleMode CheckFloat64HoleModeOf(const Operator*) WARN_UNUSED_RESULT;
+
+enum class CheckTaggedHoleMode : uint8_t {
+  kNeverReturnHole,        // Never return the hole (deoptimize instead).
+  kConvertHoleToUndefined  // Convert the hole to undefined.
+};
+
+size_t hash_value(CheckTaggedHoleMode);
+
+std::ostream& operator<<(std::ostream&, CheckTaggedHoleMode);
+
+CheckTaggedHoleMode CheckTaggedHoleModeOf(const Operator*) WARN_UNUSED_RESULT;
+
 Type* TypeOf(const Operator* op) WARN_UNUSED_RESULT;
 
+BinaryOperationHints::Hint BinaryOperationHintOf(const Operator* op);
+
+CompareOperationHints::Hint CompareOperationHintOf(const Operator* op);
+
 // Interface for building simplified operators, which represent the
 // medium-level operations of V8, including adding numbers, allocating objects,
 // indexing into objects and arrays, etc.
@@ -148,22 +175,55 @@
   const Operator* NumberShiftRight();
   const Operator* NumberShiftRightLogical();
   const Operator* NumberImul();
+  const Operator* NumberAbs();
   const Operator* NumberClz32();
   const Operator* NumberCeil();
   const Operator* NumberFloor();
+  const Operator* NumberFround();
+  const Operator* NumberAtan();
+  const Operator* NumberAtan2();
+  const Operator* NumberAtanh();
+  const Operator* NumberCbrt();
+  const Operator* NumberCos();
+  const Operator* NumberExp();
+  const Operator* NumberExpm1();
+  const Operator* NumberLog();
+  const Operator* NumberLog1p();
+  const Operator* NumberLog10();
+  const Operator* NumberLog2();
   const Operator* NumberRound();
+  const Operator* NumberSin();
+  const Operator* NumberSqrt();
+  const Operator* NumberTan();
   const Operator* NumberTrunc();
   const Operator* NumberToInt32();
   const Operator* NumberToUint32();
-  const Operator* NumberIsHoleNaN();
+
+  const Operator* NumberSilenceNaN();
+
+  const Operator* SpeculativeNumberAdd(BinaryOperationHints::Hint hint);
+  const Operator* SpeculativeNumberSubtract(BinaryOperationHints::Hint hint);
+  const Operator* SpeculativeNumberMultiply(BinaryOperationHints::Hint hint);
+  const Operator* SpeculativeNumberDivide(BinaryOperationHints::Hint hint);
+  const Operator* SpeculativeNumberModulus(BinaryOperationHints::Hint hint);
+
+  const Operator* SpeculativeNumberLessThan(CompareOperationHints::Hint hint);
+  const Operator* SpeculativeNumberLessThanOrEqual(
+      CompareOperationHints::Hint hint);
+  const Operator* SpeculativeNumberEqual(CompareOperationHints::Hint hint);
 
   const Operator* ReferenceEqual(Type* type);
 
   const Operator* StringEqual();
   const Operator* StringLessThan();
   const Operator* StringLessThanOrEqual();
+  const Operator* StringFromCharCode();
   const Operator* StringToNumber();
 
+  const Operator* PlainPrimitiveToNumber();
+  const Operator* PlainPrimitiveToWord32();
+  const Operator* PlainPrimitiveToFloat64();
+
   const Operator* ChangeTaggedSignedToInt32();
   const Operator* ChangeTaggedToInt32();
   const Operator* ChangeTaggedToUint32();
@@ -175,6 +235,21 @@
   const Operator* ChangeTaggedToBit();
   const Operator* ChangeBitToTagged();
   const Operator* TruncateTaggedToWord32();
+  const Operator* TruncateTaggedToFloat64();
+
+  const Operator* CheckBounds();
+  const Operator* CheckTaggedPointer();
+  const Operator* CheckTaggedSigned();
+
+  const Operator* CheckedInt32Add();
+  const Operator* CheckedInt32Sub();
+  const Operator* CheckedUint32ToInt32();
+  const Operator* CheckedFloat64ToInt32();
+  const Operator* CheckedTaggedToInt32();
+  const Operator* CheckedTaggedToFloat64();
+
+  const Operator* CheckFloat64Hole(CheckFloat64HoleMode);
+  const Operator* CheckTaggedHole(CheckTaggedHoleMode);
 
   const Operator* ObjectIsCallable();
   const Operator* ObjectIsNumber();
diff --git a/src/compiler/store-store-elimination.cc b/src/compiler/store-store-elimination.cc
new file mode 100644
index 0000000..a469b20
--- /dev/null
+++ b/src/compiler/store-store-elimination.cc
@@ -0,0 +1,264 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/store-store-elimination.h"
+
+#include "src/compiler/all-nodes.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define TRACE(fmt, ...)                                              \
+  do {                                                               \
+    if (FLAG_trace_store_elimination) {                              \
+      PrintF("StoreStoreElimination::ReduceEligibleNode: " fmt "\n", \
+             ##__VA_ARGS__);                                         \
+    }                                                                \
+  } while (false)
+
+// A simple store-store elimination. When the effect chain contains the
+// following sequence,
+//
+// - StoreField[[+off_1]](x1, y1)
+// - StoreField[[+off_2]](x2, y2)
+// - StoreField[[+off_3]](x3, y3)
+//   ...
+// - StoreField[[+off_n]](xn, yn)
+//
+// where the xes are the objects and the ys are the values to be stored, then
+// we are going to say that a store is superfluous if the same offset of the
+// same object will be stored to in the future. If off_i == off_j and xi == xj
+// and i < j, then we optimize the i'th StoreField away.
+//
+// This optimization should be initiated on the last StoreField in such a
+// sequence.
+//
+// The algorithm works by walking the effect chain from the last StoreField
+// upwards. While walking, we maintain a map {futureStore} from offsets to
+// nodes; initially it is empty. As we walk the effect chain upwards, if
+// futureStore[off] = n, then any store to node {n} with offset {off} is
+// guaranteed to be useless because we do a full-width[1] store to that offset
+// of that object in the near future anyway. For example, for this effect
+// chain
+//
+// 71: StoreField(60, 0)
+// 72: StoreField(65, 8)
+// 73: StoreField(63, 8)
+// 74: StoreField(65, 16)
+// 75: StoreField(62, 8)
+//
+// just before we get to 72, we will have futureStore = {8: 63, 16: 65}.
+//
+// Here is the complete process.
+//
+// - We are at the end of a sequence of consecutive StoreFields.
+// - We start out with futureStore = empty.
+// - We then walk the effect chain upwards to find the next StoreField [2].
+//
+//   1. If the offset is not a key of {futureStore} yet, we put it in.
+//   2. If the offset is a key of {futureStore}, but futureStore[offset] is a
+//      different node, we overwrite futureStore[offset] with the current node.
+//   3. If the offset is a key of {futureStore} and futureStore[offset] equals
+//      this node, we eliminate this StoreField.
+//
+//   As long as the current effect input points to a node with a single effect
+//   output, and as long as its opcode is StoreField, we keep traversing
+//   upwards.
+//
+// [1] This optimization is unsound if we optimize away a store to an offset
+//   because we store to the same offset in the future, even though the future
+//   store is narrower than the store we optimize away. Therefore, in case (1)
+//   and (2) we only add/overwrite to the dictionary when the field access has
+//   maximal size. For simplicity of implementation, we do not try to detect
+//   case (3).
+//
+// [2] We make sure that we only traverse the linear part, that is, the part
+//   where every node has exactly one incoming and one outgoing effect edge.
+//   Also, we only keep walking upwards as long as we keep finding consecutive
+//   StoreFields on the same node.
+
+StoreStoreElimination::StoreStoreElimination(JSGraph* js_graph, Zone* temp_zone)
+    : jsgraph_(js_graph), temp_zone_(temp_zone) {}
+
+StoreStoreElimination::~StoreStoreElimination() {}
+
+void StoreStoreElimination::Run() {
+  // The store-store elimination performs work on chains of certain types of
+  // nodes. The elimination must be invoked on the lowest node in such a
+  // chain; we have a helper function IsEligibleNode that returns true
+  // precisely on the lowest node in such a chain.
+  //
+  // Because the elimination removes nodes from the graph, even remove nodes
+  // that the elimination was not invoked on, we cannot use a normal
+  // AdvancedReducer but we manually find which nodes to invoke the
+  // elimination on. Then in a next step, we invoke the elimination for each
+  // node that was eligible.
+
+  NodeVector eligible(temp_zone());  // loops over all nodes
+  AllNodes all(temp_zone(), jsgraph()->graph());
+
+  for (Node* node : all.live) {
+    if (IsEligibleNode(node)) {
+      eligible.push_back(node);
+    }
+  }
+
+  for (Node* node : eligible) {
+    ReduceEligibleNode(node);
+  }
+}
+
+namespace {
+
+// 16 bits was chosen fairly arbitrarily; it seems enough now. 8 bits is too
+// few.
+typedef uint16_t Offset;
+
+// To safely cast an offset from a FieldAccess, which has a wider range
+// (namely int).
+Offset ToOffset(int offset) {
+  CHECK(0 <= offset && offset < (1 << 8 * sizeof(Offset)));
+  return (Offset)offset;
+}
+
+Offset ToOffset(const FieldAccess& access) { return ToOffset(access.offset); }
+
+// If node has a single effect use, return that node. If node has no or
+// multiple effect uses, return nullptr.
+Node* SingleEffectUse(Node* node) {
+  Node* last_use = nullptr;
+  for (Edge edge : node->use_edges()) {
+    if (!NodeProperties::IsEffectEdge(edge)) {
+      continue;
+    }
+    if (last_use != nullptr) {
+      // more than one
+      return nullptr;
+    }
+    last_use = edge.from();
+    DCHECK_NOT_NULL(last_use);
+  }
+  return last_use;
+}
+
+// Return true if node is the last consecutive StoreField node in a linear
+// part of the effect chain.
+bool IsEndOfStoreFieldChain(Node* node) {
+  Node* next_on_chain = SingleEffectUse(node);
+  return (next_on_chain == nullptr ||
+          next_on_chain->op()->opcode() != IrOpcode::kStoreField);
+}
+
+// The argument must be a StoreField node. If there is a node before it in the
+// effect chain, and if this part of the effect chain is linear (no other
+// effect uses of that previous node), then return that previous node.
+// Otherwise, return nullptr.
+//
+// The returned node need not be a StoreField.
+Node* PreviousEffectBeforeStoreField(Node* node) {
+  DCHECK_EQ(node->op()->opcode(), IrOpcode::kStoreField);
+  DCHECK_EQ(node->op()->EffectInputCount(), 1);
+
+  Node* previous = NodeProperties::GetEffectInput(node);
+  if (previous != nullptr && node == SingleEffectUse(previous)) {
+    return previous;
+  } else {
+    return nullptr;
+  }
+}
+
+size_t rep_size_of(MachineRepresentation rep) {
+  return ((size_t)1) << ElementSizeLog2Of(rep);
+}
+size_t rep_size_of(FieldAccess access) {
+  return rep_size_of(access.machine_type.representation());
+}
+
+}  // namespace
+
+bool StoreStoreElimination::IsEligibleNode(Node* node) {
+  return (node->op()->opcode() == IrOpcode::kStoreField) &&
+         IsEndOfStoreFieldChain(node);
+}
+
+void StoreStoreElimination::ReduceEligibleNode(Node* node) {
+  DCHECK(IsEligibleNode(node));
+
+  // if (FLAG_trace_store_elimination) {
+  //   PrintF("** StoreStoreElimination::ReduceEligibleNode: activated:
+  //   #%d\n",
+  //          node->id());
+  // }
+
+  TRACE("activated: #%d", node->id());
+
+  // Initialize empty futureStore.
+  ZoneMap<Offset, Node*> futureStore(temp_zone());
+
+  Node* current_node = node;
+
+  do {
+    FieldAccess access = OpParameter<FieldAccess>(current_node->op());
+    Offset offset = ToOffset(access);
+    Node* object_input = current_node->InputAt(0);
+
+    Node* previous = PreviousEffectBeforeStoreField(current_node);
+
+    CHECK(rep_size_of(access) <= rep_size_of(MachineRepresentation::kTagged));
+    if (rep_size_of(access) == rep_size_of(MachineRepresentation::kTagged)) {
+      // Try to insert. If it was present, this will preserve the original
+      // value.
+      auto insert_result =
+          futureStore.insert(std::make_pair(offset, object_input));
+      if (insert_result.second) {
+        // Key was not present. This means that there is no matching
+        // StoreField to this offset in the future, so we cannot optimize
+        // current_node away. However, we will record the current StoreField
+        // in futureStore, and continue ascending up the chain.
+        TRACE("#%d[[+%d]] -- wide, key not present", current_node->id(),
+              offset);
+      } else if (insert_result.first->second != object_input) {
+        // Key was present, and the value did not equal object_input. This
+        // means
+        // that there is a StoreField to this offset in the future, but the
+        // object instance comes from a different Node. We pessimistically
+        // assume that we cannot optimize current_node away. However, we will
+        // record the current StoreField in futureStore, and continue
+        // ascending up the chain.
+        insert_result.first->second = object_input;
+        TRACE("#%d[[+%d]] -- wide, diff object", current_node->id(), offset);
+      } else {
+        // Key was present, and the value equalled object_input. This means
+        // that soon after in the effect chain, we will do a StoreField to the
+        // same object with the same offset, therefore current_node can be
+        // optimized away. We don't need to update futureStore.
+
+        Node* previous_effect = NodeProperties::GetEffectInput(current_node);
+
+        NodeProperties::ReplaceUses(current_node, nullptr, previous_effect,
+                                    nullptr, nullptr);
+        current_node->Kill();
+        TRACE("#%d[[+%d]] -- wide, eliminated", current_node->id(), offset);
+      }
+    } else {
+      TRACE("#%d[[+%d]] -- narrow, not eliminated", current_node->id(), offset);
+    }
+
+    // Regardless of whether we eliminated node {current}, we want to
+    // continue walking up the effect chain.
+
+    current_node = previous;
+  } while (current_node != nullptr &&
+           current_node->op()->opcode() == IrOpcode::kStoreField);
+
+  TRACE("finished");
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/store-store-elimination.h b/src/compiler/store-store-elimination.h
new file mode 100644
index 0000000..1c9ae3d
--- /dev/null
+++ b/src/compiler/store-store-elimination.h
@@ -0,0 +1,40 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_STORE_STORE_ELIMINATION_H_
+#define V8_COMPILER_STORE_STORE_ELIMINATION_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class JSGraph;
+
+class StoreStoreElimination final {
+ public:
+  StoreStoreElimination(JSGraph* js_graph, Zone* temp_zone);
+  ~StoreStoreElimination();
+  void Run();
+
+ private:
+  static bool IsEligibleNode(Node* node);
+  void ReduceEligibleNode(Node* node);
+  JSGraph* jsgraph() const { return jsgraph_; }
+  Zone* temp_zone() const { return temp_zone_; }
+
+  JSGraph* const jsgraph_;
+  Zone* const temp_zone_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_STORE_STORE_ELIMINATION_H_
diff --git a/src/compiler/type-hint-analyzer.cc b/src/compiler/type-hint-analyzer.cc
index da4f268..791aa9d 100644
--- a/src/compiler/type-hint-analyzer.cc
+++ b/src/compiler/type-hint-analyzer.cc
@@ -16,17 +16,43 @@
 namespace {
 
 // TODO(bmeurer): This detour via types is ugly.
-BinaryOperationHints::Hint ToHint(Type* type) {
+BinaryOperationHints::Hint ToBinaryOperationHint(Type* type) {
   if (type->Is(Type::None())) return BinaryOperationHints::kNone;
   if (type->Is(Type::SignedSmall())) return BinaryOperationHints::kSignedSmall;
   if (type->Is(Type::Signed32())) return BinaryOperationHints::kSigned32;
-  if (type->Is(Type::Number())) return BinaryOperationHints::kNumber;
+  if (type->Is(Type::Number())) return BinaryOperationHints::kNumberOrUndefined;
   if (type->Is(Type::String())) return BinaryOperationHints::kString;
   return BinaryOperationHints::kAny;
 }
 
-}  // namespace
+CompareOperationHints::Hint ToCompareOperationHint(
+    CompareICState::State state) {
+  switch (state) {
+    case CompareICState::UNINITIALIZED:
+      return CompareOperationHints::kNone;
+    case CompareICState::BOOLEAN:
+      return CompareOperationHints::kBoolean;
+    case CompareICState::SMI:
+      return CompareOperationHints::kSignedSmall;
+    case CompareICState::NUMBER:
+      return CompareOperationHints::kNumber;
+    case CompareICState::STRING:
+      return CompareOperationHints::kString;
+    case CompareICState::INTERNALIZED_STRING:
+      return CompareOperationHints::kInternalizedString;
+    case CompareICState::UNIQUE_NAME:
+      return CompareOperationHints::kUniqueName;
+    case CompareICState::RECEIVER:
+    case CompareICState::KNOWN_RECEIVER:
+      return CompareOperationHints::kReceiver;
+    case CompareICState::GENERIC:
+      return CompareOperationHints::kAny;
+  }
+  UNREACHABLE();
+  return CompareOperationHints::kAny;
+}
 
+}  // namespace
 
 bool TypeHintAnalysis::GetBinaryOperationHints(
     TypeFeedbackId id, BinaryOperationHints* hints) const {
@@ -35,12 +61,29 @@
   Handle<Code> code = i->second;
   DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
   BinaryOpICState state(code->GetIsolate(), code->extra_ic_state());
-  *hints = BinaryOperationHints(ToHint(state.GetLeftType()),
-                                ToHint(state.GetRightType()),
-                                ToHint(state.GetResultType()));
+  *hints = BinaryOperationHints(ToBinaryOperationHint(state.GetLeftType()),
+                                ToBinaryOperationHint(state.GetRightType()),
+                                ToBinaryOperationHint(state.GetResultType()));
   return true;
 }
 
+bool TypeHintAnalysis::GetCompareOperationHints(
+    TypeFeedbackId id, CompareOperationHints* hints) const {
+  auto i = infos_.find(id);
+  if (i == infos_.end()) return false;
+  Handle<Code> code = i->second;
+  DCHECK_EQ(Code::COMPARE_IC, code->kind());
+
+  Handle<Map> map;
+  Map* raw_map = code->FindFirstMap();
+  if (raw_map != nullptr) Map::TryUpdate(handle(raw_map)).ToHandle(&map);
+
+  CompareICStub stub(code->stub_key(), code->GetIsolate());
+  *hints = CompareOperationHints(ToCompareOperationHint(stub.left()),
+                                 ToCompareOperationHint(stub.right()),
+                                 ToCompareOperationHint(stub.state()));
+  return true;
+}
 
 bool TypeHintAnalysis::GetToBooleanHints(TypeFeedbackId id,
                                          ToBooleanHints* hints) const {
@@ -67,7 +110,6 @@
   return true;
 }
 
-
 TypeHintAnalysis* TypeHintAnalyzer::Analyze(Handle<Code> code) {
   DisallowHeapAllocation no_gc;
   TypeHintAnalysis::Infos infos(zone());
@@ -79,6 +121,7 @@
     Code* target = Code::GetCodeFromTargetAddress(target_address);
     switch (target->kind()) {
       case Code::BINARY_OP_IC:
+      case Code::COMPARE_IC:
       case Code::TO_BOOLEAN_IC: {
         // Add this feedback to the {infos}.
         TypeFeedbackId id(static_cast<unsigned>(rinfo->data()));
@@ -90,7 +133,7 @@
         break;
     }
   }
-  return new (zone()) TypeHintAnalysis(infos);
+  return new (zone()) TypeHintAnalysis(infos, zone());
 }
 
 }  // namespace compiler
diff --git a/src/compiler/type-hint-analyzer.h b/src/compiler/type-hint-analyzer.h
index 1a79905..bfb6232 100644
--- a/src/compiler/type-hint-analyzer.h
+++ b/src/compiler/type-hint-analyzer.h
@@ -18,14 +18,20 @@
  public:
   typedef ZoneMap<TypeFeedbackId, Handle<Code>> Infos;
 
-  explicit TypeHintAnalysis(Infos const& infos) : infos_(infos) {}
+  explicit TypeHintAnalysis(Infos const& infos, Zone* zone)
+      : infos_(infos), zone_(zone) {}
 
   bool GetBinaryOperationHints(TypeFeedbackId id,
                                BinaryOperationHints* hints) const;
+  bool GetCompareOperationHints(TypeFeedbackId id,
+                                CompareOperationHints* hints) const;
   bool GetToBooleanHints(TypeFeedbackId id, ToBooleanHints* hints) const;
 
  private:
+  Zone* zone() const { return zone_; }
+
   Infos const infos_;
+  Zone* zone_;
 };
 
 
diff --git a/src/compiler/type-hints.cc b/src/compiler/type-hints.cc
index 06abad6..e608832 100644
--- a/src/compiler/type-hints.cc
+++ b/src/compiler/type-hints.cc
@@ -16,8 +16,8 @@
       return os << "SignedSmall";
     case BinaryOperationHints::kSigned32:
       return os << "Signed32";
-    case BinaryOperationHints::kNumber:
-      return os << "Number";
+    case BinaryOperationHints::kNumberOrUndefined:
+      return os << "NumberOrUndefined";
     case BinaryOperationHints::kString:
       return os << "String";
     case BinaryOperationHints::kAny:
@@ -27,11 +27,39 @@
   return os;
 }
 
-
 std::ostream& operator<<(std::ostream& os, BinaryOperationHints hints) {
   return os << hints.left() << "*" << hints.right() << "->" << hints.result();
 }
 
+std::ostream& operator<<(std::ostream& os, CompareOperationHints::Hint hint) {
+  switch (hint) {
+    case CompareOperationHints::kNone:
+      return os << "None";
+    case CompareOperationHints::kBoolean:
+      return os << "Boolean";
+    case CompareOperationHints::kSignedSmall:
+      return os << "SignedSmall";
+    case CompareOperationHints::kNumber:
+      return os << "Number";
+    case CompareOperationHints::kString:
+      return os << "String";
+    case CompareOperationHints::kInternalizedString:
+      return os << "InternalizedString";
+    case CompareOperationHints::kUniqueName:
+      return os << "UniqueName";
+    case CompareOperationHints::kReceiver:
+      return os << "Receiver";
+    case CompareOperationHints::kAny:
+      return os << "Any";
+  }
+  UNREACHABLE();
+  return os;
+}
+
+std::ostream& operator<<(std::ostream& os, CompareOperationHints hints) {
+  return os << hints.left() << "*" << hints.right() << " (" << hints.combined()
+            << ")";
+}
 
 std::ostream& operator<<(std::ostream& os, ToBooleanHint hint) {
   switch (hint) {
@@ -62,7 +90,6 @@
   return os;
 }
 
-
 std::ostream& operator<<(std::ostream& os, ToBooleanHints hints) {
   if (hints == ToBooleanHint::kAny) return os << "Any";
   if (hints == ToBooleanHint::kNone) return os << "None";
@@ -78,6 +105,34 @@
   return os;
 }
 
+// static
+bool BinaryOperationHints::Is(Hint h1, Hint h2) {
+  if (h1 == h2) return true;
+  switch (h1) {
+    case kNone:
+      return true;
+    case kSignedSmall:
+      return h2 == kSigned32 || h2 == kNumberOrUndefined || h2 == kAny;
+    case kSigned32:
+      return h2 == kNumberOrUndefined || h2 == kAny;
+    case kNumberOrUndefined:
+      return h2 == kAny;
+    case kString:
+      return h2 == kAny;
+    case kAny:
+      return false;
+  }
+  UNREACHABLE();
+  return false;
+}
+
+// static
+BinaryOperationHints::Hint BinaryOperationHints::Combine(Hint h1, Hint h2) {
+  if (Is(h1, h2)) return h2;
+  if (Is(h2, h1)) return h1;
+  return kAny;
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/type-hints.h b/src/compiler/type-hints.h
index f1cc640..7c9badd 100644
--- a/src/compiler/type-hints.h
+++ b/src/compiler/type-hints.h
@@ -15,7 +15,14 @@
 // Type hints for an binary operation.
 class BinaryOperationHints final {
  public:
-  enum Hint { kNone, kSignedSmall, kSigned32, kNumber, kString, kAny };
+  enum Hint {
+    kNone,
+    kSignedSmall,
+    kSigned32,
+    kNumberOrUndefined,
+    kString,
+    kAny
+  };
 
   BinaryOperationHints() : BinaryOperationHints(kNone, kNone, kNone) {}
   BinaryOperationHints(Hint left, Hint right, Hint result)
@@ -29,6 +36,11 @@
   Hint left() const { return LeftField::decode(bit_field_); }
   Hint right() const { return RightField::decode(bit_field_); }
   Hint result() const { return ResultField::decode(bit_field_); }
+  Hint combined() const { return Combine(Combine(left(), right()), result()); }
+
+  // Hint 'subtyping' and generalization.
+  static bool Is(Hint h1, Hint h2);
+  static Hint Combine(Hint h1, Hint h2);
 
   bool operator==(BinaryOperationHints const& that) const {
     return this->bit_field_ == that.bit_field_;
@@ -52,6 +64,55 @@
 std::ostream& operator<<(std::ostream&, BinaryOperationHints::Hint);
 std::ostream& operator<<(std::ostream&, BinaryOperationHints);
 
+// Type hints for an binary operation.
+class CompareOperationHints final {
+ public:
+  enum Hint {
+    kNone,
+    kBoolean,
+    kSignedSmall,
+    kNumber,
+    kString,
+    kInternalizedString,
+    kUniqueName,
+    kReceiver,
+    kAny
+  };
+
+  CompareOperationHints() : CompareOperationHints(kNone, kNone, kNone) {}
+  CompareOperationHints(Hint left, Hint right, Hint combined)
+      : bit_field_(LeftField::encode(left) | RightField::encode(right) |
+                   CombinedField::encode(combined)) {}
+
+  static CompareOperationHints Any() {
+    return CompareOperationHints(kAny, kAny, kAny);
+  }
+
+  Hint left() const { return LeftField::decode(bit_field_); }
+  Hint right() const { return RightField::decode(bit_field_); }
+  Hint combined() const { return CombinedField::decode(bit_field_); }
+
+  bool operator==(CompareOperationHints const& that) const {
+    return this->bit_field_ == that.bit_field_;
+  }
+  bool operator!=(CompareOperationHints const& that) const {
+    return !(*this == that);
+  }
+
+  friend size_t hash_value(CompareOperationHints const& hints) {
+    return hints.bit_field_;
+  }
+
+ private:
+  typedef BitField<Hint, 0, 4> LeftField;
+  typedef BitField<Hint, 4, 4> RightField;
+  typedef BitField<Hint, 8, 4> CombinedField;
+
+  uint32_t bit_field_;
+};
+
+std::ostream& operator<<(std::ostream&, CompareOperationHints::Hint);
+std::ostream& operator<<(std::ostream&, CompareOperationHints);
 
 // Type hints for the ToBoolean type conversion.
 enum class ToBooleanHint : uint16_t {
diff --git a/src/compiler/typer.cc b/src/compiler/typer.cc
index d98d2fe..2bc0bb3 100644
--- a/src/compiler/typer.cc
+++ b/src/compiler/typer.cc
@@ -10,8 +10,9 @@
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph-reducer.h"
 #include "src/compiler/js-operator.h"
-#include "src/compiler/node.h"
 #include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operation-typer.h"
 #include "src/compiler/simplified-operator.h"
 #include "src/objects-inl.h"
 #include "src/type-cache.h"
@@ -37,14 +38,15 @@
       dependencies_(dependencies),
       function_type_(function_type),
       decorator_(nullptr),
-      cache_(TypeCache::Get()) {
+      cache_(TypeCache::Get()),
+      operation_typer_(isolate, zone()) {
   Zone* zone = this->zone();
   Factory* const factory = isolate->factory();
 
   Type* infinity = Type::Constant(factory->infinity_value(), zone);
   Type* minus_infinity = Type::Constant(factory->minus_infinity_value(), zone);
-  // TODO(neis): Unfortunately, the infinities created in other places might
-  // be different ones (eg the result of NewNumber in TypeNumberConstant).
+  // Unfortunately, the infinities created in other places might be different
+  // ones (eg the result of NewNumber in TypeNumberConstant).
   Type* truncating_to_zero =
       Type::Union(Type::Union(infinity, minus_infinity, zone),
                   Type::MinusZeroOrNaN(), zone);
@@ -232,7 +234,6 @@
   static ComparisonOutcome Invert(ComparisonOutcome, Typer*);
   static Type* Invert(Type*, Typer*);
   static Type* FalsifyUndefined(ComparisonOutcome, Typer*);
-  static Type* Rangify(Type*, Typer*);
 
   static Type* ToPrimitive(Type*, Typer*);
   static Type* ToBoolean(Type*, Typer*);
@@ -242,6 +243,7 @@
   static Type* ToNumber(Type*, Typer*);
   static Type* ToObject(Type*, Typer*);
   static Type* ToString(Type*, Typer*);
+  static Type* NumberAbs(Type*, Typer*);
   static Type* NumberCeil(Type*, Typer*);
   static Type* NumberFloor(Type*, Typer*);
   static Type* NumberRound(Type*, Typer*);
@@ -256,11 +258,6 @@
   static Type* ObjectIsString(Type*, Typer*);
   static Type* ObjectIsUndetectable(Type*, Typer*);
 
-  static Type* JSAddRanger(RangeType*, RangeType*, Typer*);
-  static Type* JSSubtractRanger(RangeType*, RangeType*, Typer*);
-  static Type* JSDivideRanger(RangeType*, RangeType*, Typer*);
-  static Type* JSModulusRanger(RangeType*, RangeType*, Typer*);
-
   static ComparisonOutcome JSCompareTyper(Type*, Type*, Typer*);
 
 #define DECLARE_METHOD(x) static Type* x##Typer(Type*, Type*, Typer*);
@@ -272,6 +269,7 @@
   static Type* JSCallFunctionTyper(Type*, Typer*);
 
   static Type* ReferenceEqualTyper(Type*, Type*, Typer*);
+  static Type* StringFromCharCodeTyper(Type*, Typer*);
 
   Reduction UpdateType(Node* node, Type* current) {
     if (NodeProperties::IsTyped(node)) {
@@ -380,27 +378,8 @@
   return t->singleton_true_;
 }
 
-
-Type* Typer::Visitor::Rangify(Type* type, Typer* t) {
-  if (type->IsRange()) return type;        // Shortcut.
-  if (!type->Is(t->cache_.kInteger)) {
-    return type;  // Give up on non-integer types.
-  }
-  double min = type->Min();
-  double max = type->Max();
-  // Handle the degenerate case of empty bitset types (such as
-  // OtherUnsigned31 and OtherSigned32 on 64-bit architectures).
-  if (std::isnan(min)) {
-    DCHECK(std::isnan(max));
-    return type;
-  }
-  return Type::Range(min, max, t->zone());
-}
-
-
 // Type conversion.
 
-
 Type* Typer::Visitor::ToPrimitive(Type* type, Typer* t) {
   if (type->Is(Type::Primitive()) && !type->Maybe(Type::Receiver())) {
     return type;
@@ -501,6 +480,34 @@
 }
 
 // static
+Type* Typer::Visitor::NumberAbs(Type* type, Typer* t) {
+  DCHECK(type->Is(Type::Number()));
+  Factory* const f = t->isolate()->factory();
+  bool const maybe_nan = type->Maybe(Type::NaN());
+  bool const maybe_minuszero = type->Maybe(Type::MinusZero());
+  type = Type::Intersect(type, Type::PlainNumber(), t->zone());
+  double const max = type->Max();
+  double const min = type->Min();
+  if (min < 0) {
+    if (type->Is(t->cache_.kInteger)) {
+      type =
+          Type::Range(0.0, std::max(std::fabs(min), std::fabs(max)), t->zone());
+    } else if (min == max) {
+      type = Type::Constant(f->NewNumber(std::fabs(min)), t->zone());
+    } else {
+      type = Type::PlainNumber();
+    }
+  }
+  if (maybe_minuszero) {
+    type = Type::Union(type, t->cache_.kSingletonZero, t->zone());
+  }
+  if (maybe_nan) {
+    type = Type::Union(type, Type::NaN(), t->zone());
+  }
+  return type;
+}
+
+// static
 Type* Typer::Visitor::NumberCeil(Type* type, Typer* t) {
   DCHECK(type->Is(Type::Number()));
   if (type->Is(t->cache_.kIntegerOrMinusZeroOrNaN)) return type;
@@ -533,7 +540,6 @@
 }
 
 Type* Typer::Visitor::NumberToInt32(Type* type, Typer* t) {
-  // TODO(neis): DCHECK(type->Is(Type::Number()));
   if (type->Is(Type::Signed32())) return type;
   if (type->Is(t->cache_.kZeroish)) return t->cache_.kSingletonZero;
   if (type->Is(t->signed32ish_)) {
@@ -546,7 +552,6 @@
 
 
 Type* Typer::Visitor::NumberToUint32(Type* type, Typer* t) {
-  // TODO(neis): DCHECK(type->Is(Type::Number()));
   if (type->Is(Type::Unsigned32())) return type;
   if (type->Is(t->cache_.kZeroish)) return t->cache_.kSingletonZero;
   if (type->Is(t->unsigned32ish_)) {
@@ -557,7 +562,6 @@
   return Type::Unsigned32();
 }
 
-
 // Type checks.
 
 Type* Typer::Visitor::ObjectIsCallable(Type* type, Typer* t) {
@@ -705,7 +709,7 @@
   return Type::Intersect(input_type, guard_type, zone());
 }
 
-Type* Typer::Visitor::TypeCheckPoint(Node* node) {
+Type* Typer::Visitor::TypeCheckpoint(Node* node) {
   UNREACHABLE();
   return nullptr;
 }
@@ -765,7 +769,6 @@
   if (lhs->IsConstant() && rhs->Is(lhs)) {
     // Types are equal and are inhabited only by a single semantic value,
     // which is not nan due to the earlier check.
-    // TODO(neis): Extend this to Range(x,x), MinusZero, ...?
     return t->singleton_true_;
   }
   return Type::Boolean();
@@ -876,7 +879,6 @@
   return FalsifyUndefined(Invert(JSCompareTyper(lhs, rhs, t), t), t);
 }
 
-
 // JS bitwise operators.
 
 
@@ -909,7 +911,6 @@
     max = std::min(max, -1.0);
   }
   return Type::Range(min, max, t->zone());
-  // TODO(neis): Be precise for singleton inputs, here and elsewhere.
 }
 
 
@@ -1010,64 +1011,6 @@
 
 // JS arithmetic operators.
 
-
-// Returns the array's least element, ignoring NaN.
-// There must be at least one non-NaN element.
-// Any -0 is converted to 0.
-static double array_min(double a[], size_t n) {
-  DCHECK(n != 0);
-  double x = +V8_INFINITY;
-  for (size_t i = 0; i < n; ++i) {
-    if (!std::isnan(a[i])) {
-      x = std::min(a[i], x);
-    }
-  }
-  DCHECK(!std::isnan(x));
-  return x == 0 ? 0 : x;  // -0 -> 0
-}
-
-
-// Returns the array's greatest element, ignoring NaN.
-// There must be at least one non-NaN element.
-// Any -0 is converted to 0.
-static double array_max(double a[], size_t n) {
-  DCHECK(n != 0);
-  double x = -V8_INFINITY;
-  for (size_t i = 0; i < n; ++i) {
-    if (!std::isnan(a[i])) {
-      x = std::max(a[i], x);
-    }
-  }
-  DCHECK(!std::isnan(x));
-  return x == 0 ? 0 : x;  // -0 -> 0
-}
-
-Type* Typer::Visitor::JSAddRanger(RangeType* lhs, RangeType* rhs, Typer* t) {
-  double results[4];
-  results[0] = lhs->Min() + rhs->Min();
-  results[1] = lhs->Min() + rhs->Max();
-  results[2] = lhs->Max() + rhs->Min();
-  results[3] = lhs->Max() + rhs->Max();
-  // Since none of the inputs can be -0, the result cannot be -0 either.
-  // However, it can be nan (the sum of two infinities of opposite sign).
-  // On the other hand, if none of the "results" above is nan, then the actual
-  // result cannot be nan either.
-  int nans = 0;
-  for (int i = 0; i < 4; ++i) {
-    if (std::isnan(results[i])) ++nans;
-  }
-  if (nans == 4) return Type::NaN();  // [-inf..-inf] + [inf..inf] or vice versa
-  Type* range =
-      Type::Range(array_min(results, 4), array_max(results, 4), t->zone());
-  return nans == 0 ? range : Type::Union(range, Type::NaN(), t->zone());
-  // Examples:
-  //   [-inf, -inf] + [+inf, +inf] = NaN
-  //   [-inf, -inf] + [n, +inf] = [-inf, -inf] \/ NaN
-  //   [-inf, +inf] + [n, +inf] = [-inf, +inf] \/ NaN
-  //   [-inf, m] + [n, +inf] = [-inf, +inf] \/ NaN
-}
-
-
 Type* Typer::Visitor::JSAddTyper(Type* lhs, Type* rhs, Typer* t) {
   lhs = ToPrimitive(lhs, t);
   rhs = ToPrimitive(rhs, t);
@@ -1078,97 +1021,27 @@
       return Type::NumberOrString();
     }
   }
-  lhs = Rangify(ToNumber(lhs, t), t);
-  rhs = Rangify(ToNumber(rhs, t), t);
-  if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
-  if (lhs->IsRange() && rhs->IsRange()) {
-    return JSAddRanger(lhs->AsRange(), rhs->AsRange(), t);
-  }
-  // TODO(neis): Deal with numeric bitsets here and elsewhere.
-  return Type::Number();
+  // The addition must be numeric.
+  return t->operation_typer()->NumericAdd(ToNumber(lhs, t), ToNumber(rhs, t));
 }
 
-Type* Typer::Visitor::JSSubtractRanger(RangeType* lhs, RangeType* rhs,
-                                       Typer* t) {
-  double results[4];
-  results[0] = lhs->Min() - rhs->Min();
-  results[1] = lhs->Min() - rhs->Max();
-  results[2] = lhs->Max() - rhs->Min();
-  results[3] = lhs->Max() - rhs->Max();
-  // Since none of the inputs can be -0, the result cannot be -0.
-  // However, it can be nan (the subtraction of two infinities of same sign).
-  // On the other hand, if none of the "results" above is nan, then the actual
-  // result cannot be nan either.
-  int nans = 0;
-  for (int i = 0; i < 4; ++i) {
-    if (std::isnan(results[i])) ++nans;
-  }
-  if (nans == 4) return Type::NaN();  // [inf..inf] - [inf..inf] (all same sign)
-  Type* range =
-      Type::Range(array_min(results, 4), array_max(results, 4), t->zone());
-  return nans == 0 ? range : Type::Union(range, Type::NaN(), t->zone());
-  // Examples:
-  //   [-inf, +inf] - [-inf, +inf] = [-inf, +inf] \/ NaN
-  //   [-inf, -inf] - [-inf, -inf] = NaN
-  //   [-inf, -inf] - [n, +inf] = [-inf, -inf] \/ NaN
-  //   [m, +inf] - [-inf, n] = [-inf, +inf] \/ NaN
-}
-
-
 Type* Typer::Visitor::JSSubtractTyper(Type* lhs, Type* rhs, Typer* t) {
-  lhs = Rangify(ToNumber(lhs, t), t);
-  rhs = Rangify(ToNumber(rhs, t), t);
-  if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
-  if (lhs->IsRange() && rhs->IsRange()) {
-    return JSSubtractRanger(lhs->AsRange(), rhs->AsRange(), t);
-  }
-  return Type::Number();
+  return t->operation_typer()->NumericSubtract(ToNumber(lhs, t),
+                                               ToNumber(rhs, t));
 }
 
-
 Type* Typer::Visitor::JSMultiplyTyper(Type* lhs, Type* rhs, Typer* t) {
-  lhs = Rangify(ToNumber(lhs, t), t);
-  rhs = Rangify(ToNumber(rhs, t), t);
-  if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
-  if (lhs->IsRange() && rhs->IsRange()) {
-    double results[4];
-    double lmin = lhs->AsRange()->Min();
-    double lmax = lhs->AsRange()->Max();
-    double rmin = rhs->AsRange()->Min();
-    double rmax = rhs->AsRange()->Max();
-    results[0] = lmin * rmin;
-    results[1] = lmin * rmax;
-    results[2] = lmax * rmin;
-    results[3] = lmax * rmax;
-    // If the result may be nan, we give up on calculating a precise type,
-    // because
-    // the discontinuity makes it too complicated.  Note that even if none of
-    // the
-    // "results" above is nan, the actual result may still be, so we have to do
-    // a
-    // different check:
-    bool maybe_nan = (lhs->Maybe(t->cache_.kSingletonZero) &&
-                      (rmin == -V8_INFINITY || rmax == +V8_INFINITY)) ||
-                     (rhs->Maybe(t->cache_.kSingletonZero) &&
-                      (lmin == -V8_INFINITY || lmax == +V8_INFINITY));
-    if (maybe_nan) return t->cache_.kIntegerOrMinusZeroOrNaN;  // Giving up.
-    bool maybe_minuszero = (lhs->Maybe(t->cache_.kSingletonZero) && rmin < 0) ||
-                           (rhs->Maybe(t->cache_.kSingletonZero) && lmin < 0);
-    Type* range =
-        Type::Range(array_min(results, 4), array_max(results, 4), t->zone());
-    return maybe_minuszero ? Type::Union(range, Type::MinusZero(), t->zone())
-                           : range;
-  }
-  return Type::Number();
+  return t->operation_typer()->NumericMultiply(ToNumber(lhs, t),
+                                               ToNumber(rhs, t));
 }
 
-
 Type* Typer::Visitor::JSDivideTyper(Type* lhs, Type* rhs, Typer* t) {
+  return t->operation_typer()->NumericDivide(ToNumber(lhs, t),
+                                             ToNumber(rhs, t));
   lhs = ToNumber(lhs, t);
   rhs = ToNumber(rhs, t);
   if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
   // Division is tricky, so all we do is try ruling out nan.
-  // TODO(neis): try ruling out -0 as well?
   bool maybe_nan =
       lhs->Maybe(Type::NaN()) || rhs->Maybe(t->cache_.kZeroish) ||
       ((lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY) &&
@@ -1176,56 +1049,9 @@
   return maybe_nan ? Type::Number() : Type::OrderedNumber();
 }
 
-Type* Typer::Visitor::JSModulusRanger(RangeType* lhs, RangeType* rhs,
-                                      Typer* t) {
-  double lmin = lhs->Min();
-  double lmax = lhs->Max();
-  double rmin = rhs->Min();
-  double rmax = rhs->Max();
-
-  double labs = std::max(std::abs(lmin), std::abs(lmax));
-  double rabs = std::max(std::abs(rmin), std::abs(rmax)) - 1;
-  double abs = std::min(labs, rabs);
-  bool maybe_minus_zero = false;
-  double omin = 0;
-  double omax = 0;
-  if (lmin >= 0) {  // {lhs} positive.
-    omin = 0;
-    omax = abs;
-  } else if (lmax <= 0) {  // {lhs} negative.
-    omin = 0 - abs;
-    omax = 0;
-    maybe_minus_zero = true;
-  } else {
-    omin = 0 - abs;
-    omax = abs;
-    maybe_minus_zero = true;
-  }
-
-  Type* result = Type::Range(omin, omax, t->zone());
-  if (maybe_minus_zero)
-    result = Type::Union(result, Type::MinusZero(), t->zone());
-  return result;
-}
-
-
 Type* Typer::Visitor::JSModulusTyper(Type* lhs, Type* rhs, Typer* t) {
-  lhs = ToNumber(lhs, t);
-  rhs = ToNumber(rhs, t);
-  if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
-
-  if (lhs->Maybe(Type::NaN()) || rhs->Maybe(t->cache_.kZeroish) ||
-      lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY) {
-    // Result maybe NaN.
-    return Type::Number();
-  }
-
-  lhs = Rangify(lhs, t);
-  rhs = Rangify(rhs, t);
-  if (lhs->IsRange() && rhs->IsRange()) {
-    return JSModulusRanger(lhs->AsRange(), rhs->AsRange(), t);
-  }
-  return Type::OrderedNumber();
+  return t->operation_typer()->NumericModulus(ToNumber(lhs, t),
+                                              ToNumber(rhs, t));
 }
 
 
@@ -1550,9 +1376,10 @@
         case kMathTrunc:
           return t->cache_.kIntegerOrMinusZeroOrNaN;
         // Unary math functions.
+        case kMathExp:
+          return Type::Union(Type::PlainNumber(), Type::NaN(), t->zone());
         case kMathAbs:
         case kMathLog:
-        case kMathExp:
         case kMathSqrt:
         case kMathCos:
         case kMathSin:
@@ -1616,9 +1443,6 @@
     case Runtime::kInlineDoubleLo:
     case Runtime::kInlineDoubleHi:
       return Type::Signed32();
-    case Runtime::kInlineConstructDouble:
-    case Runtime::kInlineMathAtan2:
-      return Type::Number();
     case Runtime::kInlineCreateIterResultObject:
     case Runtime::kInlineRegExpConstructResult:
       return Type::OtherObject();
@@ -1686,10 +1510,21 @@
   return nullptr;
 }
 
+Type* Typer::Visitor::TypeJSGeneratorStore(Node* node) {
+  UNREACHABLE();
+  return nullptr;
+}
+
+Type* Typer::Visitor::TypeJSGeneratorRestoreContinuation(Node* node) {
+  return typer_->cache_.kSmi;
+}
+
+Type* Typer::Visitor::TypeJSGeneratorRestoreRegister(Node* node) {
+  return Type::Any();
+}
 
 Type* Typer::Visitor::TypeJSStackCheck(Node* node) { return Type::Any(); }
 
-
 // Simplified operators.
 
 Type* Typer::Visitor::TypeBooleanNot(Node* node) { return Type::Boolean(); }
@@ -1706,10 +1541,42 @@
   return Type::Boolean();
 }
 
+Type* Typer::Visitor::TypeSpeculativeNumberEqual(Node* node) {
+  return Type::Boolean();
+}
+
+Type* Typer::Visitor::TypeSpeculativeNumberLessThan(Node* node) {
+  return Type::Boolean();
+}
+
+Type* Typer::Visitor::TypeSpeculativeNumberLessThanOrEqual(Node* node) {
+  return Type::Boolean();
+}
+
 Type* Typer::Visitor::TypeNumberAdd(Node* node) { return Type::Number(); }
 
 Type* Typer::Visitor::TypeNumberSubtract(Node* node) { return Type::Number(); }
 
+Type* Typer::Visitor::TypeSpeculativeNumberAdd(Node* node) {
+  return Type::Number();
+}
+
+Type* Typer::Visitor::TypeSpeculativeNumberSubtract(Node* node) {
+  return Type::Number();
+}
+
+Type* Typer::Visitor::TypeSpeculativeNumberMultiply(Node* node) {
+  return Type::Number();
+}
+
+Type* Typer::Visitor::TypeSpeculativeNumberDivide(Node* node) {
+  return Type::Number();
+}
+
+Type* Typer::Visitor::TypeSpeculativeNumberModulus(Node* node) {
+  return Type::Number();
+}
+
 Type* Typer::Visitor::TypeNumberMultiply(Node* node) { return Type::Number(); }
 
 Type* Typer::Visitor::TypeNumberDivide(Node* node) { return Type::Number(); }
@@ -1745,8 +1612,24 @@
   return Type::Unsigned32();
 }
 
+Type* Typer::Visitor::TypePlainPrimitiveToNumber(Node* node) {
+  return TypeUnaryOp(node, ToNumber);
+}
+
+Type* Typer::Visitor::TypePlainPrimitiveToWord32(Node* node) {
+  return Type::Integral32();
+}
+
+Type* Typer::Visitor::TypePlainPrimitiveToFloat64(Node* node) {
+  return Type::Number();
+}
+
 Type* Typer::Visitor::TypeNumberImul(Node* node) { return Type::Signed32(); }
 
+Type* Typer::Visitor::TypeNumberAbs(Node* node) {
+  return TypeUnaryOp(node, NumberAbs);
+}
+
 Type* Typer::Visitor::TypeNumberClz32(Node* node) {
   return typer_->cache_.kZeroToThirtyTwo;
 }
@@ -1759,10 +1642,43 @@
   return TypeUnaryOp(node, NumberFloor);
 }
 
+Type* Typer::Visitor::TypeNumberFround(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeNumberAtan(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeNumberAtan2(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeNumberAtanh(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeNumberCos(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeNumberExp(Node* node) {
+  return Type::Union(Type::PlainNumber(), Type::NaN(), zone());
+}
+
+// TODO(mvstanton): Is this type sufficient, or should it look like Exp()?
+Type* Typer::Visitor::TypeNumberExpm1(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeNumberLog(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeNumberLog1p(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeNumberLog2(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeNumberLog10(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeNumberCbrt(Node* node) { return Type::Number(); }
+
 Type* Typer::Visitor::TypeNumberRound(Node* node) {
   return TypeUnaryOp(node, NumberRound);
 }
 
+Type* Typer::Visitor::TypeNumberSin(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeNumberSqrt(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeNumberTan(Node* node) { return Type::Number(); }
+
 Type* Typer::Visitor::TypeNumberTrunc(Node* node) {
   return TypeUnaryOp(node, NumberTrunc);
 }
@@ -1777,10 +1693,6 @@
 }
 
 
-Type* Typer::Visitor::TypeNumberIsHoleNaN(Node* node) {
-  return Type::Boolean();
-}
-
 // static
 Type* Typer::Visitor::ReferenceEqualTyper(Type* lhs, Type* rhs, Typer* t) {
   if (lhs->IsConstant() && rhs->Is(lhs)) {
@@ -1802,6 +1714,23 @@
   return Type::Boolean();
 }
 
+Type* Typer::Visitor::StringFromCharCodeTyper(Type* type, Typer* t) {
+  type = NumberToUint32(ToNumber(type, t), t);
+  Factory* f = t->isolate()->factory();
+  double min = type->Min();
+  double max = type->Max();
+  if (min == max) {
+    uint32_t code = static_cast<uint32_t>(min) & String::kMaxUtf16CodeUnitU;
+    Handle<String> string = f->LookupSingleCharacterStringFromCode(code);
+    return Type::Constant(string, t->zone());
+  }
+  return Type::String();
+}
+
+Type* Typer::Visitor::TypeStringFromCharCode(Node* node) {
+  return TypeUnaryOp(node, StringFromCharCodeTyper);
+}
+
 Type* Typer::Visitor::TypeStringToNumber(Node* node) {
   return TypeUnaryOp(node, ToNumber);
 }
@@ -1817,33 +1746,45 @@
 
 Type* Typer::Visitor::TypeChangeTaggedSignedToInt32(Node* node) {
   Type* arg = Operand(node, 0);
-  // TODO(neis): DCHECK(arg->Is(Type::Signed32()));
+  // TODO(jarin): DCHECK(arg->Is(Type::Signed32()));
+  // Many tests fail this check.
   return ChangeRepresentation(arg, Type::UntaggedIntegral32(), zone());
 }
 
 Type* Typer::Visitor::TypeChangeTaggedToInt32(Node* node) {
   Type* arg = Operand(node, 0);
-  // TODO(neis): DCHECK(arg->Is(Type::Signed32()));
+  DCHECK(arg->Is(Type::Signed32()));
   return ChangeRepresentation(arg, Type::UntaggedIntegral32(), zone());
 }
 
 
 Type* Typer::Visitor::TypeChangeTaggedToUint32(Node* node) {
   Type* arg = Operand(node, 0);
-  // TODO(neis): DCHECK(arg->Is(Type::Unsigned32()));
+  DCHECK(arg->Is(Type::Unsigned32()));
   return ChangeRepresentation(arg, Type::UntaggedIntegral32(), zone());
 }
 
 
 Type* Typer::Visitor::TypeChangeTaggedToFloat64(Node* node) {
   Type* arg = Operand(node, 0);
-  // TODO(neis): DCHECK(arg->Is(Type::Number()));
+  DCHECK(arg->Is(Type::Number()));
+  return ChangeRepresentation(arg, Type::UntaggedFloat64(), zone());
+}
+
+Type* Typer::Visitor::TypeTruncateTaggedToFloat64(Node* node) {
+  Type* arg = Operand(node, 0);
+  // TODO(jarin) This DCHECK does not work because of speculative feedback.
+  // Re-enable once we record the speculative feedback in types.
+  // DCHECK(arg->Is(Type::NumberOrOddball()));
   return ChangeRepresentation(arg, Type::UntaggedFloat64(), zone());
 }
 
 Type* Typer::Visitor::TypeChangeInt31ToTaggedSigned(Node* node) {
   Type* arg = Operand(node, 0);
-  // TODO(neis): DCHECK(arg->Is(Type::Signed31()));
+  // TODO(jarin): DCHECK(arg->Is(Type::Signed31()));
+  // Some mjsunit/asm and mjsunit/wasm tests fail this check.
+  // For instance, asm/int32-umod fails with Signed32/UntaggedIntegral32 in
+  // simplified-lowering (after propagation).
   Type* rep =
       arg->Is(Type::SignedSmall()) ? Type::TaggedSigned() : Type::Tagged();
   return ChangeRepresentation(arg, rep, zone());
@@ -1851,41 +1792,109 @@
 
 Type* Typer::Visitor::TypeChangeInt32ToTagged(Node* node) {
   Type* arg = Operand(node, 0);
-  // TODO(neis): DCHECK(arg->Is(Type::Signed32()));
+  // TODO(jarin): DCHECK(arg->Is(Type::Signed32()));
+  // Two tests fail this check: mjsunit/asm/sqlite3/sqlite-safe-heap and
+  // mjsunit/wasm/embenchen/lua_binarytrees. The first one fails with Any/Any in
+  // simplified-lowering (after propagation).
   Type* rep =
       arg->Is(Type::SignedSmall()) ? Type::TaggedSigned() : Type::Tagged();
   return ChangeRepresentation(arg, rep, zone());
 }
 
-
 Type* Typer::Visitor::TypeChangeUint32ToTagged(Node* node) {
   Type* arg = Operand(node, 0);
-  // TODO(neis): DCHECK(arg->Is(Type::Unsigned32()));
+  // TODO(jarin): DCHECK(arg->Is(Type::Unsigned32()));
+  // This fails in benchmarks/octane/mandreel (--turbo).
   return ChangeRepresentation(arg, Type::Tagged(), zone());
 }
 
-
 Type* Typer::Visitor::TypeChangeFloat64ToTagged(Node* node) {
   Type* arg = Operand(node, 0);
-  // TODO(neis): CHECK(arg.upper->Is(Type::Number()));
+  // TODO(jarin): DCHECK(arg->Is(Type::Number()));
+  // Some (or all) mjsunit/wasm/embenchen/ tests fail this check when run with
+  // --turbo and --always-opt.
   return ChangeRepresentation(arg, Type::Tagged(), zone());
 }
 
 Type* Typer::Visitor::TypeChangeTaggedToBit(Node* node) {
   Type* arg = Operand(node, 0);
-  // TODO(neis): DCHECK(arg.upper->Is(Type::Boolean()));
+  DCHECK(arg->Is(Type::Boolean()));
   return ChangeRepresentation(arg, Type::UntaggedBit(), zone());
 }
 
 Type* Typer::Visitor::TypeChangeBitToTagged(Node* node) {
   Type* arg = Operand(node, 0);
-  // TODO(neis): DCHECK(arg.upper->Is(Type::Boolean()));
   return ChangeRepresentation(arg, Type::TaggedPointer(), zone());
 }
 
+Type* Typer::Visitor::TypeCheckBounds(Node* node) {
+  // TODO(bmeurer): We could do better here based on the limit.
+  return Type::Unsigned31();
+}
+
+Type* Typer::Visitor::TypeCheckTaggedPointer(Node* node) {
+  Type* arg = Operand(node, 0);
+  return Type::Intersect(arg, Type::TaggedPointer(), zone());
+}
+
+Type* Typer::Visitor::TypeCheckTaggedSigned(Node* node) {
+  Type* arg = Operand(node, 0);
+  return Type::Intersect(arg, typer_->cache_.kSmi, zone());
+}
+
+Type* Typer::Visitor::TypeCheckedInt32Add(Node* node) {
+  return Type::Integral32();
+}
+
+Type* Typer::Visitor::TypeCheckedInt32Sub(Node* node) {
+  return Type::Integral32();
+}
+
+Type* Typer::Visitor::TypeCheckedUint32ToInt32(Node* node) {
+  return Type::Signed32();
+}
+
+Type* Typer::Visitor::TypeCheckedFloat64ToInt32(Node* node) {
+  return Type::Signed32();
+}
+
+Type* Typer::Visitor::TypeCheckedTaggedToInt32(Node* node) {
+  return Type::Signed32();
+}
+
+Type* Typer::Visitor::TypeCheckedTaggedToFloat64(Node* node) {
+  return Type::Number();
+}
+
+Type* Typer::Visitor::TypeCheckFloat64Hole(Node* node) {
+  Type* type = Operand(node, 0);
+  return type;
+}
+
+Type* Typer::Visitor::TypeCheckTaggedHole(Node* node) {
+  CheckTaggedHoleMode mode = CheckTaggedHoleModeOf(node->op());
+  Type* type = Operand(node, 0);
+  type = Type::Intersect(type, Type::NonInternal(), zone());
+  switch (mode) {
+    case CheckTaggedHoleMode::kConvertHoleToUndefined: {
+      // The hole is turned into undefined.
+      type = Type::Union(type, Type::Undefined(), zone());
+      break;
+    }
+    case CheckTaggedHoleMode::kNeverReturnHole: {
+      // We deoptimize in case of the hole.
+      break;
+    }
+  }
+  return type;
+}
+
 Type* Typer::Visitor::TypeTruncateTaggedToWord32(Node* node) {
   Type* arg = Operand(node, 0);
-  // TODO(neis): DCHECK(arg->Is(Type::Number()));
+  // TODO(jarin): DCHECK(arg->Is(Type::NumberOrUndefined()));
+  // Several mjsunit and cctest tests fail this check. For instance,
+  // mjsunit/compiler/regress-607493 fails with Any/Any in simplified-lowering
+  // (after propagation).
   return ChangeRepresentation(arg, Type::UntaggedIntegral32(), zone());
 }
 
@@ -2008,6 +2017,10 @@
 
 // Machine operators.
 
+Type* Typer::Visitor::TypeDebugBreak(Node* node) { return Type::None(); }
+
+Type* Typer::Visitor::TypeComment(Node* node) { return Type::None(); }
+
 Type* Typer::Visitor::TypeLoad(Node* node) { return Type::Any(); }
 
 Type* Typer::Visitor::TypeStackSlot(Node* node) { return Type::Any(); }
@@ -2210,6 +2223,9 @@
   return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
 }
 
+Type* Typer::Visitor::TypeNumberSilenceNaN(Node* node) {
+  return Type::Number();
+}
 
 Type* Typer::Visitor::TypeChangeFloat64ToUint32(Node* node) {
   return Type::Intersect(Type::Unsigned32(), Type::UntaggedIntegral32(),
@@ -2256,6 +2272,9 @@
   return Type::Intersect(Type::Signed32(), Type::UntaggedFloat64(), zone());
 }
 
+Type* Typer::Visitor::TypeFloat64SilenceNaN(Node* node) {
+  return Type::UntaggedFloat64();
+}
 
 Type* Typer::Visitor::TypeChangeInt32ToInt64(Node* node) {
   return Type::Internal();
@@ -2349,6 +2368,8 @@
   return Type::Number();
 }
 
+Type* Typer::Visitor::TypeFloat32Neg(Node* node) { return Type::Number(); }
+
 Type* Typer::Visitor::TypeFloat32Mul(Node* node) { return Type::Number(); }
 
 
@@ -2392,6 +2413,8 @@
   return Type::Number();
 }
 
+Type* Typer::Visitor::TypeFloat64Neg(Node* node) { return Type::Number(); }
+
 Type* Typer::Visitor::TypeFloat64Mul(Node* node) { return Type::Number(); }
 
 
@@ -2412,9 +2435,33 @@
   return Type::Number();
 }
 
+Type* Typer::Visitor::TypeFloat64Atan(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Atan2(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Atanh(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Cos(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Exp(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Expm1(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Log(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Log1p(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Log2(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Log10(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Cbrt(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Sin(Node* node) { return Type::Number(); }
 
 Type* Typer::Visitor::TypeFloat64Sqrt(Node* node) { return Type::Number(); }
 
+Type* Typer::Visitor::TypeFloat64Tan(Node* node) { return Type::Number(); }
 
 Type* Typer::Visitor::TypeFloat64Equal(Node* node) { return Type::Boolean(); }
 
@@ -2547,6 +2594,7 @@
 #define SIMD_RETURN_SIMD(Name) \
   Type* Typer::Visitor::Type##Name(Node* node) { return Type::Simd(); }
 MACHINE_SIMD_RETURN_SIMD_OP_LIST(SIMD_RETURN_SIMD)
+MACHINE_SIMD_GENERIC_OP_LIST(SIMD_RETURN_SIMD)
 #undef SIMD_RETURN_SIMD
 
 #define SIMD_RETURN_NUM(Name) \
diff --git a/src/compiler/typer.h b/src/compiler/typer.h
index 0982b28..b6c5cb3 100644
--- a/src/compiler/typer.h
+++ b/src/compiler/typer.h
@@ -7,6 +7,7 @@
 
 #include "src/base/flags.h"
 #include "src/compiler/graph.h"
+#include "src/compiler/operation-typer.h"
 #include "src/types.h"
 
 namespace v8 {
@@ -18,6 +19,7 @@
 
 namespace compiler {
 
+class OperationTyper;
 
 class Typer {
  public:
@@ -47,6 +49,7 @@
   Flags flags() const { return flags_; }
   CompilationDependencies* dependencies() const { return dependencies_; }
   FunctionType* function_type() const { return function_type_; }
+  OperationTyper* operation_typer() { return &operation_typer_; }
 
   Isolate* const isolate_;
   Graph* const graph_;
@@ -55,6 +58,7 @@
   FunctionType* function_type_;
   Decorator* decorator_;
   TypeCache const& cache_;
+  OperationTyper operation_typer_;
 
   Type* singleton_false_;
   Type* singleton_true_;
diff --git a/src/compiler/verifier.cc b/src/compiler/verifier.cc
index 0e34285..365f075 100644
--- a/src/compiler/verifier.cc
+++ b/src/compiler/verifier.cc
@@ -420,7 +420,7 @@
     case IrOpcode::kTypeGuard:
       // TODO(bmeurer): what are the constraints on these?
       break;
-    case IrOpcode::kCheckPoint:
+    case IrOpcode::kCheckpoint:
       // Type is empty.
       CheckNotTyped(node);
       break;
@@ -636,11 +636,31 @@
     case IrOpcode::kJSStoreMessage:
       break;
 
+    case IrOpcode::kJSGeneratorStore:
+      CheckNotTyped(node);
+      break;
+
+    case IrOpcode::kJSGeneratorRestoreContinuation:
+      CheckUpperIs(node, Type::SignedSmall());
+      break;
+
+    case IrOpcode::kJSGeneratorRestoreRegister:
+      CheckUpperIs(node, Type::Any());
+      break;
+
     case IrOpcode::kJSStackCheck:
       // Type is empty.
       CheckNotTyped(node);
       break;
 
+    case IrOpcode::kDebugBreak:
+      CheckNotTyped(node);
+      break;
+
+    case IrOpcode::kComment:
+      CheckNotTyped(node);
+      break;
+
     // Simplified operators
     // -------------------------------
     case IrOpcode::kBooleanNot:
@@ -654,16 +674,28 @@
       CheckUpperIs(node, Type::Number());
       break;
     case IrOpcode::kNumberEqual:
-      // (NumberOrUndefined, NumberOrUndefined) -> Boolean
-      CheckValueInputIs(node, 0, Type::NumberOrUndefined());
-      CheckValueInputIs(node, 1, Type::NumberOrUndefined());
+      // (Number, Number) -> Boolean
+      CheckValueInputIs(node, 0, Type::Number());
+      CheckValueInputIs(node, 1, Type::Number());
       CheckUpperIs(node, Type::Boolean());
       break;
     case IrOpcode::kNumberLessThan:
     case IrOpcode::kNumberLessThanOrEqual:
       // (Number, Number) -> Boolean
-      CheckValueInputIs(node, 0, Type::NumberOrUndefined());
-      CheckValueInputIs(node, 1, Type::NumberOrUndefined());
+      CheckValueInputIs(node, 0, Type::Number());
+      CheckValueInputIs(node, 1, Type::Number());
+      CheckUpperIs(node, Type::Boolean());
+      break;
+    case IrOpcode::kSpeculativeNumberAdd:
+    case IrOpcode::kSpeculativeNumberSubtract:
+    case IrOpcode::kSpeculativeNumberMultiply:
+    case IrOpcode::kSpeculativeNumberDivide:
+    case IrOpcode::kSpeculativeNumberModulus:
+      CheckUpperIs(node, Type::Number());
+      break;
+    case IrOpcode::kSpeculativeNumberEqual:
+    case IrOpcode::kSpeculativeNumberLessThan:
+    case IrOpcode::kSpeculativeNumberLessThanOrEqual:
       CheckUpperIs(node, Type::Boolean());
       break;
     case IrOpcode::kNumberAdd:
@@ -671,16 +703,15 @@
     case IrOpcode::kNumberMultiply:
     case IrOpcode::kNumberDivide:
       // (Number, Number) -> Number
-      CheckValueInputIs(node, 0, Type::NumberOrUndefined());
-      CheckValueInputIs(node, 1, Type::NumberOrUndefined());
-      // CheckUpperIs(node, Type::Number());
+      CheckValueInputIs(node, 0, Type::Number());
+      CheckValueInputIs(node, 1, Type::Number());
+      CheckUpperIs(node, Type::Number());
       break;
     case IrOpcode::kNumberModulus:
       // (Number, Number) -> Number
       CheckValueInputIs(node, 0, Type::Number());
       CheckValueInputIs(node, 1, Type::Number());
-      // TODO(rossberg): activate once we retype after opcode changes.
-      // CheckUpperIs(node, Type::Number());
+      CheckUpperIs(node, Type::Number());
       break;
     case IrOpcode::kNumberBitwiseOr:
     case IrOpcode::kNumberBitwiseXor:
@@ -714,9 +745,30 @@
       CheckValueInputIs(node, 0, Type::Unsigned32());
       CheckUpperIs(node, Type::Unsigned32());
       break;
+    case IrOpcode::kNumberAtan2:
+      // (Number, Number) -> Number
+      CheckValueInputIs(node, 0, Type::Number());
+      CheckValueInputIs(node, 1, Type::Number());
+      CheckUpperIs(node, Type::Number());
+      break;
+    case IrOpcode::kNumberAbs:
     case IrOpcode::kNumberCeil:
     case IrOpcode::kNumberFloor:
+    case IrOpcode::kNumberFround:
+    case IrOpcode::kNumberAtan:
+    case IrOpcode::kNumberAtanh:
+    case IrOpcode::kNumberCos:
+    case IrOpcode::kNumberExp:
+    case IrOpcode::kNumberExpm1:
+    case IrOpcode::kNumberLog:
+    case IrOpcode::kNumberLog1p:
+    case IrOpcode::kNumberLog2:
+    case IrOpcode::kNumberLog10:
+    case IrOpcode::kNumberCbrt:
     case IrOpcode::kNumberRound:
+    case IrOpcode::kNumberSin:
+    case IrOpcode::kNumberSqrt:
+    case IrOpcode::kNumberTan:
     case IrOpcode::kNumberTrunc:
       // Number -> Number
       CheckValueInputIs(node, 0, Type::Number());
@@ -724,18 +776,23 @@
       break;
     case IrOpcode::kNumberToInt32:
       // Number -> Signed32
-      CheckValueInputIs(node, 0, Type::NumberOrUndefined());
+      CheckValueInputIs(node, 0, Type::Number());
       CheckUpperIs(node, Type::Signed32());
       break;
     case IrOpcode::kNumberToUint32:
       // Number -> Unsigned32
-      CheckValueInputIs(node, 0, Type::NumberOrUndefined());
+      CheckValueInputIs(node, 0, Type::Number());
       CheckUpperIs(node, Type::Unsigned32());
       break;
-    case IrOpcode::kNumberIsHoleNaN:
-      // Number -> Boolean
-      CheckValueInputIs(node, 0, Type::Number());
-      CheckUpperIs(node, Type::Boolean());
+    case IrOpcode::kPlainPrimitiveToNumber:
+      // Type is Number.
+      CheckUpperIs(node, Type::Number());
+      break;
+    case IrOpcode::kPlainPrimitiveToWord32:
+      CheckUpperIs(node, Type::Number());
+      break;
+    case IrOpcode::kPlainPrimitiveToFloat64:
+      CheckUpperIs(node, Type::Number());
       break;
     case IrOpcode::kStringEqual:
     case IrOpcode::kStringLessThan:
@@ -745,6 +802,11 @@
       CheckValueInputIs(node, 1, Type::String());
       CheckUpperIs(node, Type::Boolean());
       break;
+    case IrOpcode::kStringFromCharCode:
+      // Number -> String
+      CheckValueInputIs(node, 0, Type::Number());
+      CheckUpperIs(node, Type::String());
+      break;
     case IrOpcode::kStringToNumber:
       // String -> Number
       CheckValueInputIs(node, 0, Type::String());
@@ -798,7 +860,7 @@
       break;
     }
     case IrOpcode::kChangeTaggedToFloat64: {
-      // Number /\ Tagged -> Number /\ UntaggedFloat64
+      // NumberOrUndefined /\ Tagged -> Number /\ UntaggedFloat64
       // TODO(neis): Activate once ChangeRepresentation works in typer.
       // Type* from = Type::Intersect(Type::Number(), Type::Tagged());
       // Type* to = Type::Intersect(Type::Number(), Type::UntaggedFloat64());
@@ -806,6 +868,16 @@
       // CheckUpperIs(node, to));
       break;
     }
+    case IrOpcode::kTruncateTaggedToFloat64: {
+      // NumberOrUndefined /\ Tagged -> Number /\ UntaggedFloat64
+      // TODO(neis): Activate once ChangeRepresentation works in typer.
+      // Type* from = Type::Intersect(Type::NumberOrUndefined(),
+      // Type::Tagged());
+      // Type* to = Type::Intersect(Type::Number(), Type::UntaggedFloat64());
+      // CheckValueInputIs(node, 0, from));
+      // CheckUpperIs(node, to));
+      break;
+    }
     case IrOpcode::kChangeInt31ToTaggedSigned: {
       // Signed31 /\ UntaggedInt32 -> Signed31 /\ Tagged
       // TODO(neis): Activate once ChangeRepresentation works in typer.
@@ -870,6 +942,37 @@
       break;
     }
 
+    case IrOpcode::kCheckBounds:
+      CheckValueInputIs(node, 0, Type::Any());
+      CheckValueInputIs(node, 1, Type::Unsigned31());
+      CheckUpperIs(node, Type::Unsigned31());
+      break;
+    case IrOpcode::kCheckTaggedSigned:
+      CheckValueInputIs(node, 0, Type::Any());
+      CheckUpperIs(node, Type::TaggedSigned());
+      break;
+    case IrOpcode::kCheckTaggedPointer:
+      CheckValueInputIs(node, 0, Type::Any());
+      CheckUpperIs(node, Type::TaggedPointer());
+      break;
+
+    case IrOpcode::kCheckedInt32Add:
+    case IrOpcode::kCheckedInt32Sub:
+    case IrOpcode::kCheckedUint32ToInt32:
+    case IrOpcode::kCheckedFloat64ToInt32:
+    case IrOpcode::kCheckedTaggedToInt32:
+    case IrOpcode::kCheckedTaggedToFloat64:
+      break;
+
+    case IrOpcode::kCheckFloat64Hole:
+      CheckValueInputIs(node, 0, Type::Number());
+      CheckUpperIs(node, Type::Number());
+      break;
+    case IrOpcode::kCheckTaggedHole:
+      CheckValueInputIs(node, 0, Type::Any());
+      CheckUpperIs(node, Type::Any());
+      break;
+
     case IrOpcode::kLoadField:
       // Object -> fieldtype
       // TODO(rossberg): activate once machine ops are typed.
@@ -900,6 +1003,10 @@
       // CheckValueInputIs(node, 1, ElementAccessOf(node->op()).type));
       CheckNotTyped(node);
       break;
+    case IrOpcode::kNumberSilenceNaN:
+      CheckValueInputIs(node, 0, Type::Number());
+      CheckUpperIs(node, Type::Number());
+      break;
 
     // Machine operators
     // -----------------------
@@ -961,6 +1068,7 @@
     case IrOpcode::kFloat32Add:
     case IrOpcode::kFloat32Sub:
     case IrOpcode::kFloat32SubPreserveNan:
+    case IrOpcode::kFloat32Neg:
     case IrOpcode::kFloat32Mul:
     case IrOpcode::kFloat32Div:
     case IrOpcode::kFloat32Max:
@@ -973,13 +1081,27 @@
     case IrOpcode::kFloat64Add:
     case IrOpcode::kFloat64Sub:
     case IrOpcode::kFloat64SubPreserveNan:
+    case IrOpcode::kFloat64Neg:
     case IrOpcode::kFloat64Mul:
     case IrOpcode::kFloat64Div:
     case IrOpcode::kFloat64Mod:
     case IrOpcode::kFloat64Max:
     case IrOpcode::kFloat64Min:
     case IrOpcode::kFloat64Abs:
+    case IrOpcode::kFloat64Atan:
+    case IrOpcode::kFloat64Atan2:
+    case IrOpcode::kFloat64Atanh:
+    case IrOpcode::kFloat64Cos:
+    case IrOpcode::kFloat64Exp:
+    case IrOpcode::kFloat64Expm1:
+    case IrOpcode::kFloat64Log:
+    case IrOpcode::kFloat64Log1p:
+    case IrOpcode::kFloat64Log2:
+    case IrOpcode::kFloat64Log10:
+    case IrOpcode::kFloat64Cbrt:
+    case IrOpcode::kFloat64Sin:
     case IrOpcode::kFloat64Sqrt:
+    case IrOpcode::kFloat64Tan:
     case IrOpcode::kFloat32RoundDown:
     case IrOpcode::kFloat64RoundDown:
     case IrOpcode::kFloat32RoundUp:
@@ -1014,6 +1136,7 @@
     case IrOpcode::kChangeFloat32ToFloat64:
     case IrOpcode::kChangeFloat64ToInt32:
     case IrOpcode::kChangeFloat64ToUint32:
+    case IrOpcode::kFloat64SilenceNaN:
     case IrOpcode::kTruncateFloat64ToUint32:
     case IrOpcode::kTruncateFloat32ToInt32:
     case IrOpcode::kTruncateFloat32ToUint32:
diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc
index 619e639..0a13f98 100644
--- a/src/compiler/wasm-compiler.cc
+++ b/src/compiler/wasm-compiler.cc
@@ -30,7 +30,6 @@
 #include "src/code-stubs.h"
 #include "src/factory.h"
 #include "src/log-inl.h"
-#include "src/profiler/cpu-profiler.h"
 
 #include "src/wasm/ast-decoder.h"
 #include "src/wasm/wasm-module.h"
@@ -607,7 +606,8 @@
     case wasm::kExprF64Pow:
       return BuildF64Pow(left, right);
     case wasm::kExprF64Atan2:
-      return BuildF64Atan2(left, right);
+      op = m->Float64Atan2();
+      break;
     case wasm::kExprF64Mod:
       return BuildF64Mod(left, right);
     case wasm::kExprI32AsmjsDivS:
@@ -645,16 +645,28 @@
     case wasm::kExprF32Abs:
       op = m->Float32Abs();
       break;
-    case wasm::kExprF32Neg:
-      return BuildF32Neg(input);
+    case wasm::kExprF32Neg: {
+      if (m->Float32Neg().IsSupported()) {
+        op = m->Float32Neg().op();
+        break;
+      } else {
+        return BuildF32Neg(input);
+      }
+    }
     case wasm::kExprF32Sqrt:
       op = m->Float32Sqrt();
       break;
     case wasm::kExprF64Abs:
       op = m->Float64Abs();
       break;
-    case wasm::kExprF64Neg:
-      return BuildF64Neg(input);
+    case wasm::kExprF64Neg: {
+      if (m->Float64Neg().IsSupported()) {
+        op = m->Float64Neg().op();
+        break;
+      } else {
+        return BuildF64Neg(input);
+      }
+    }
     case wasm::kExprF64Sqrt:
       op = m->Float64Sqrt();
       break;
@@ -769,24 +781,28 @@
     case wasm::kExprF64Asin: {
       return BuildF64Asin(input);
     }
-    case wasm::kExprF64Atan: {
-      return BuildF64Atan(input);
-    }
+    case wasm::kExprF64Atan:
+      op = m->Float64Atan();
+      break;
     case wasm::kExprF64Cos: {
-      return BuildF64Cos(input);
+      op = m->Float64Cos();
+      break;
     }
     case wasm::kExprF64Sin: {
-      return BuildF64Sin(input);
+      op = m->Float64Sin();
+      break;
     }
     case wasm::kExprF64Tan: {
-      return BuildF64Tan(input);
+      op = m->Float64Tan();
+      break;
     }
     case wasm::kExprF64Exp: {
-      return BuildF64Exp(input);
+      op = m->Float64Exp();
+      break;
     }
-    case wasm::kExprF64Log: {
-      return BuildF64Log(input);
-    }
+    case wasm::kExprF64Log:
+      op = m->Float64Log();
+      break;
     case wasm::kExprI32ConvertI64:
       op = m->TruncateInt64ToInt32();
       break;
@@ -1336,55 +1352,6 @@
   return BuildCFuncInstruction(ref, type, input);
 }
 
-Node* WasmGraphBuilder::BuildF64Atan(Node* input) {
-  MachineType type = MachineType::Float64();
-  ExternalReference ref =
-      ExternalReference::f64_atan_wrapper_function(jsgraph()->isolate());
-  return BuildCFuncInstruction(ref, type, input);
-}
-
-Node* WasmGraphBuilder::BuildF64Cos(Node* input) {
-  MachineType type = MachineType::Float64();
-  ExternalReference ref =
-      ExternalReference::f64_cos_wrapper_function(jsgraph()->isolate());
-  return BuildCFuncInstruction(ref, type, input);
-}
-
-Node* WasmGraphBuilder::BuildF64Sin(Node* input) {
-  MachineType type = MachineType::Float64();
-  ExternalReference ref =
-      ExternalReference::f64_sin_wrapper_function(jsgraph()->isolate());
-  return BuildCFuncInstruction(ref, type, input);
-}
-
-Node* WasmGraphBuilder::BuildF64Tan(Node* input) {
-  MachineType type = MachineType::Float64();
-  ExternalReference ref =
-      ExternalReference::f64_tan_wrapper_function(jsgraph()->isolate());
-  return BuildCFuncInstruction(ref, type, input);
-}
-
-Node* WasmGraphBuilder::BuildF64Exp(Node* input) {
-  MachineType type = MachineType::Float64();
-  ExternalReference ref =
-      ExternalReference::f64_exp_wrapper_function(jsgraph()->isolate());
-  return BuildCFuncInstruction(ref, type, input);
-}
-
-Node* WasmGraphBuilder::BuildF64Log(Node* input) {
-  MachineType type = MachineType::Float64();
-  ExternalReference ref =
-      ExternalReference::f64_log_wrapper_function(jsgraph()->isolate());
-  return BuildCFuncInstruction(ref, type, input);
-}
-
-Node* WasmGraphBuilder::BuildF64Atan2(Node* left, Node* right) {
-  MachineType type = MachineType::Float64();
-  ExternalReference ref =
-      ExternalReference::f64_atan2_wrapper_function(jsgraph()->isolate());
-  return BuildCFuncInstruction(ref, type, left, right);
-}
-
 Node* WasmGraphBuilder::BuildF64Pow(Node* left, Node* right) {
   MachineType type = MachineType::Float64();
   ExternalReference ref =
@@ -1512,9 +1479,10 @@
   } else {
     Node* trunc = graph()->NewNode(
         jsgraph()->machine()->TryTruncateFloat32ToInt64(), input);
-    Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
-    Node* overflow =
-        graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+    Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc,
+                                    graph()->start());
+    Node* overflow = graph()->NewNode(jsgraph()->common()->Projection(1), trunc,
+                                      graph()->start());
     trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
     return result;
   }
@@ -1529,9 +1497,10 @@
   } else {
     Node* trunc = graph()->NewNode(
         jsgraph()->machine()->TryTruncateFloat32ToUint64(), input);
-    Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
-    Node* overflow =
-        graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+    Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc,
+                                    graph()->start());
+    Node* overflow = graph()->NewNode(jsgraph()->common()->Projection(1), trunc,
+                                      graph()->start());
     trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
     return result;
   }
@@ -1546,9 +1515,10 @@
   } else {
     Node* trunc = graph()->NewNode(
         jsgraph()->machine()->TryTruncateFloat64ToInt64(), input);
-    Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
-    Node* overflow =
-        graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+    Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc,
+                                    graph()->start());
+    Node* overflow = graph()->NewNode(jsgraph()->common()->Projection(1), trunc,
+                                      graph()->start());
     trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
     return result;
   }
@@ -1563,9 +1533,10 @@
   } else {
     Node* trunc = graph()->NewNode(
         jsgraph()->machine()->TryTruncateFloat64ToUint64(), input);
-    Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
-    Node* overflow =
-        graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+    Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc,
+                                    graph()->start());
+    Node* overflow = graph()->NewNode(jsgraph()->common()->Projection(1), trunc,
+                                      graph()->start());
     trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
     return result;
   }
@@ -1896,7 +1867,7 @@
   DCHECK_NULL(args[0]);
 
   // Add code object as constant.
-  args[0] = HeapConstant(module_->GetFunctionCode(index));
+  args[0] = HeapConstant(module_->GetCodeOrPlaceholder(index));
   wasm::FunctionSig* sig = module_->GetFunctionSignature(index);
 
   return BuildWasmCall(sig, args, position);
@@ -1950,8 +1921,9 @@
                                           Int32Constant(kPointerSizeLog2)),
                          Int32Constant(fixed_offset)),
         *effect_, *control_);
-    Node* sig_match = graph()->NewNode(machine->WordEqual(), load_sig,
-                                       jsgraph()->SmiConstant(index));
+    Node* sig_match =
+        graph()->NewNode(machine->Word32Equal(),
+                         BuildChangeSmiToInt32(load_sig), Int32Constant(index));
     trap_->AddTrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
   }
 
@@ -2008,9 +1980,10 @@
     return BuildChangeInt32ToSmi(value);
   }
 
-  Node* add = graph()->NewNode(machine->Int32AddWithOverflow(), value, value);
+  Node* add = graph()->NewNode(machine->Int32AddWithOverflow(), value, value,
+                               graph()->start());
 
-  Node* ovf = graph()->NewNode(common->Projection(1), add);
+  Node* ovf = graph()->NewNode(common->Projection(1), add, graph()->start());
   Node* branch = graph()->NewNode(common->Branch(BranchHint::kFalse), ovf,
                                   graph()->start());
 
@@ -2019,7 +1992,7 @@
       graph()->NewNode(machine->ChangeInt32ToFloat64(), value), if_true);
 
   Node* if_false = graph()->NewNode(common->IfFalse(), branch);
-  Node* vfalse = graph()->NewNode(common->Projection(0), add);
+  Node* vfalse = graph()->NewNode(common->Projection(0), add, if_false);
 
   Node* merge = graph()->NewNode(common->Merge(2), if_true, if_false);
   Node* phi = graph()->NewNode(common->Phi(MachineRepresentation::kTagged, 2),
@@ -2072,10 +2045,10 @@
   if (machine->Is64()) {
     vsmi = BuildChangeInt32ToSmi(value32);
   } else {
-    Node* smi_tag =
-        graph()->NewNode(machine->Int32AddWithOverflow(), value32, value32);
+    Node* smi_tag = graph()->NewNode(machine->Int32AddWithOverflow(), value32,
+                                     value32, if_smi);
 
-    Node* check_ovf = graph()->NewNode(common->Projection(1), smi_tag);
+    Node* check_ovf = graph()->NewNode(common->Projection(1), smi_tag, if_smi);
     Node* branch_ovf =
         graph()->NewNode(common->Branch(BranchHint::kFalse), check_ovf, if_smi);
 
@@ -2083,7 +2056,7 @@
     if_box = graph()->NewNode(common->Merge(2), if_ovf, if_box);
 
     if_smi = graph()->NewNode(common->IfFalse(), branch_ovf);
-    vsmi = graph()->NewNode(common->Projection(0), smi_tag);
+    vsmi = graph()->NewNode(common->Projection(0), smi_tag, if_smi);
   }
 
   // Allocate the box for the {value}.
@@ -2335,7 +2308,9 @@
   Callable callable = CodeFactory::AllocateHeapNumber(jsgraph()->isolate());
   Node* target = jsgraph()->HeapConstant(callable.code());
   Node* context = jsgraph()->NoContextConstant();
-  Node* effect = graph()->NewNode(common->BeginRegion(), graph()->start());
+  Node* effect =
+      graph()->NewNode(common->BeginRegion(RegionObservability::kNotObservable),
+                       graph()->start());
   if (!allocate_heap_number_operator_.is_set()) {
     CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
         jsgraph()->isolate(), jsgraph()->zone(), callable.descriptor(), 0,
@@ -2388,7 +2363,7 @@
   args[pos++] = HeapConstant(wasm_code);
 
   // Convert JS parameters to WASM numbers.
-  for (int i = 0; i < wasm_count; i++) {
+  for (int i = 0; i < wasm_count; ++i) {
     Node* param =
         graph()->NewNode(jsgraph()->common()->Parameter(i + 1), start);
     Node* wasm_param = FromJS(param, context, sig->GetParam(i));
@@ -2414,7 +2389,8 @@
   if (jsgraph()->machine()->Is32() && sig->return_count() > 0 &&
       sig->GetReturn(0) == wasm::kAstI64) {
     // The return values comes as two values, we pick the low word.
-    retval = graph()->NewNode(jsgraph()->common()->Projection(0), retval);
+    retval = graph()->NewNode(jsgraph()->common()->Projection(0), retval,
+                              graph()->start());
   }
   Node* jsval =
       ToJS(retval, context,
@@ -2476,7 +2452,7 @@
 
   // Convert WASM numbers to JS values.
   int param_index = 0;
-  for (int i = 0; i < wasm_count; i++) {
+  for (int i = 0; i < wasm_count; ++i) {
     Node* param =
         graph()->NewNode(jsgraph()->common()->Parameter(param_index++), start);
     args[pos++] = ToJS(param, context, sig->GetParam(i));
@@ -2537,10 +2513,13 @@
   DCHECK(module_ && module_->instance);
   uint32_t size = static_cast<uint32_t>(module_->instance->mem_size);
   if (offset == 0) {
-    if (!mem_size_) mem_size_ = jsgraph()->Int32Constant(size);
+    if (!mem_size_)
+      mem_size_ = jsgraph()->RelocatableInt32Constant(
+          size, RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
     return mem_size_;
   } else {
-    return jsgraph()->Int32Constant(size + offset);
+    return jsgraph()->RelocatableInt32Constant(
+        size + offset, RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
   }
 }
 
@@ -2554,11 +2533,11 @@
 }
 
 Node* WasmGraphBuilder::LoadGlobal(uint32_t index) {
-  DCHECK(module_ && module_->instance && module_->instance->globals_start);
   MachineType mem_type = module_->GetGlobalType(index);
-  Node* addr = jsgraph()->IntPtrConstant(
+  Node* addr = jsgraph()->RelocatableIntPtrConstant(
       reinterpret_cast<uintptr_t>(module_->instance->globals_start +
-                                  module_->module->globals[index].offset));
+                                  module_->module->globals[index].offset),
+      RelocInfo::WASM_GLOBAL_REFERENCE);
   const Operator* op = jsgraph()->machine()->Load(mem_type);
   Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), *effect_,
                                 *control_);
@@ -2567,11 +2546,11 @@
 }
 
 Node* WasmGraphBuilder::StoreGlobal(uint32_t index, Node* val) {
-  DCHECK(module_ && module_->instance && module_->instance->globals_start);
   MachineType mem_type = module_->GetGlobalType(index);
-  Node* addr = jsgraph()->IntPtrConstant(
+  Node* addr = jsgraph()->RelocatableIntPtrConstant(
       reinterpret_cast<uintptr_t>(module_->instance->globals_start +
-                                  module_->module->globals[index].offset));
+                                  module_->module->globals[index].offset),
+      RelocInfo::WASM_GLOBAL_REFERENCE);
   const Operator* op = jsgraph()->machine()->Store(
       StoreRepresentation(mem_type.representation(), kNoWriteBarrier));
   Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), val,
@@ -2584,46 +2563,177 @@
                                       uint32_t offset,
                                       wasm::WasmCodePosition position) {
   DCHECK(module_ && module_->instance);
-  size_t size = module_->instance->mem_size;
+  uint32_t size = module_->instance->mem_size;
   byte memsize = wasm::WasmOpcodes::MemSize(memtype);
 
-  if (offset >= size || (static_cast<uint64_t>(offset) + memsize) > size) {
-    // The access will always throw (unless memory is grown).
-    Node* cond = jsgraph()->Int32Constant(0);
-    trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
-    return;
-  }
-
   // Check against the effective size.
-  size_t effective_size = size - offset - memsize;
+  size_t effective_size;
+  if (offset >= size || (static_cast<uint64_t>(offset) + memsize) > size) {
+    effective_size = 0;
+  } else {
+    effective_size = size - offset - memsize + 1;
+  }
   CHECK(effective_size <= kMaxUInt32);
 
   Uint32Matcher m(index);
   if (m.HasValue()) {
     uint32_t value = m.Value();
-    if (value <= effective_size) {
+    if (value < effective_size) {
       // The bounds check will always succeed.
       return;
     }
   }
 
-  Node* cond = graph()->NewNode(
-      jsgraph()->machine()->Uint32LessThanOrEqual(), index,
-      jsgraph()->Int32Constant(static_cast<uint32_t>(effective_size)));
+  Node* cond = graph()->NewNode(jsgraph()->machine()->Uint32LessThan(), index,
+                                jsgraph()->RelocatableInt32Constant(
+                                    static_cast<uint32_t>(effective_size),
+                                    RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
 
   trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
 }
 
+MachineType WasmGraphBuilder::GetTypeForUnalignedAccess(uint32_t alignment,
+                                                        bool signExtend) {
+  switch (alignment) {
+    case 0:
+      return signExtend ? MachineType::Int8() : MachineType::Uint8();
+    case 1:
+      return signExtend ? MachineType::Int16() : MachineType::Uint16();
+    case 2:
+      return signExtend ? MachineType::Int32() : MachineType::Uint32();
+    default:
+      UNREACHABLE();
+      return MachineType::None();
+  }
+}
+
+Node* WasmGraphBuilder::GetUnalignedLoadOffsetNode(Node* baseOffset,
+                                                   int numberOfBytes,
+                                                   int stride, int current) {
+  int offset;
+  wasm::WasmOpcode addOpcode;
+
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+  offset = numberOfBytes - stride - current;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+  offset = current;
+#else
+#error Unsupported endianness
+#endif
+
+#if WASM_64
+  addOpcode = wasm::kExprI64Add;
+#else
+  addOpcode = wasm::kExprI32Add;
+#endif
+
+  if (offset == 0) {
+    return baseOffset;
+  } else {
+    return Binop(addOpcode, baseOffset, jsgraph()->Int32Constant(offset));
+  }
+}
+
+Node* WasmGraphBuilder::BuildUnalignedLoad(wasm::LocalType type,
+                                           MachineType memtype, Node* index,
+                                           uint32_t offset,
+                                           uint32_t alignment) {
+  Node* result;
+  Node* load;
+  bool extendTo64Bit = false;
+
+  wasm::WasmOpcode shiftOpcode;
+  wasm::WasmOpcode orOpcode;
+  Node* shiftConst;
+
+  bool signExtend = memtype.IsSigned();
+
+  bool isFloat = IsFloatingPoint(memtype.representation());
+  int stride =
+      1 << ElementSizeLog2Of(
+          GetTypeForUnalignedAccess(alignment, false).representation());
+  int numberOfBytes = 1 << ElementSizeLog2Of(memtype.representation());
+  DCHECK(numberOfBytes % stride == 0);
+
+  switch (type) {
+    case wasm::kAstI64:
+    case wasm::kAstF64:
+      shiftOpcode = wasm::kExprI64Shl;
+      orOpcode = wasm::kExprI64Ior;
+      result = jsgraph()->Int64Constant(0);
+      shiftConst = jsgraph()->Int64Constant(8 * stride);
+      extendTo64Bit = true;
+      break;
+    case wasm::kAstI32:
+    case wasm::kAstF32:
+      shiftOpcode = wasm::kExprI32Shl;
+      orOpcode = wasm::kExprI32Ior;
+      result = jsgraph()->Int32Constant(0);
+      shiftConst = jsgraph()->Int32Constant(8 * stride);
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  Node* baseOffset = MemBuffer(offset);
+
+  for (int i = 0; i < numberOfBytes; i += stride) {
+    result = Binop(shiftOpcode, result, shiftConst);
+    load = graph()->NewNode(
+        jsgraph()->machine()->Load(
+            GetTypeForUnalignedAccess(alignment, signExtend)),
+        GetUnalignedLoadOffsetNode(baseOffset, numberOfBytes, stride, i), index,
+        *effect_, *control_);
+    *effect_ = load;
+    if (extendTo64Bit) {
+      if (signExtend) {
+        load =
+            graph()->NewNode(jsgraph()->machine()->ChangeInt32ToInt64(), load);
+      } else {
+        load = graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(),
+                                load);
+      }
+    }
+    signExtend = false;
+    result = Binop(orOpcode, result, load);
+  }
+
+  // Convert to float
+  if (isFloat) {
+    switch (type) {
+      case wasm::kAstF32:
+        result = Unop(wasm::kExprF32ReinterpretI32, result);
+        break;
+      case wasm::kAstF64:
+        result = Unop(wasm::kExprF64ReinterpretI64, result);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  }
+
+  return result;
+}
+
 Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
                                 Node* index, uint32_t offset,
+                                uint32_t alignment,
                                 wasm::WasmCodePosition position) {
   Node* load;
+
   // WASM semantics throw on OOB. Introduce explicit bounds check.
   BoundsCheckMem(memtype, index, offset, position);
-  load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
-                          MemBuffer(offset), index, *effect_, *control_);
+  bool aligned = static_cast<int>(alignment) >=
+                 ElementSizeLog2Of(memtype.representation());
 
-  *effect_ = load;
+  if (aligned ||
+      jsgraph()->machine()->UnalignedLoadSupported(memtype, alignment)) {
+    load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
+                            MemBuffer(offset), index, *effect_, *control_);
+    *effect_ = load;
+  } else {
+    load = BuildUnalignedLoad(type, memtype, index, offset, alignment);
+  }
 
   if (type == wasm::kAstI64 &&
       ElementSizeLog2Of(memtype.representation()) < 3) {
@@ -2641,16 +2751,120 @@
   return load;
 }
 
+Node* WasmGraphBuilder::GetUnalignedStoreOffsetNode(Node* baseOffset,
+                                                    int numberOfBytes,
+                                                    int stride, int current) {
+  int offset;
+  wasm::WasmOpcode addOpcode;
+
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+  offset = current;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+  offset = numberOfBytes - stride - current;
+#else
+#error Unsupported endianness
+#endif
+
+#if WASM_64
+  addOpcode = wasm::kExprI64Add;
+#else
+  addOpcode = wasm::kExprI32Add;
+#endif
+
+  if (offset == 0) {
+    return baseOffset;
+  } else {
+    return Binop(addOpcode, baseOffset, jsgraph()->Int32Constant(offset));
+  }
+}
+
+Node* WasmGraphBuilder::BuildUnalignedStore(MachineType memtype, Node* index,
+                                            uint32_t offset, uint32_t alignment,
+                                            Node* val) {
+  Node* store;
+  Node* newValue;
+
+  wasm::WasmOpcode shiftOpcode;
+
+  Node* shiftConst;
+  bool extendTo64Bit = false;
+  bool isFloat = IsFloatingPoint(memtype.representation());
+  int stride = 1 << ElementSizeLog2Of(
+                   GetTypeForUnalignedAccess(alignment).representation());
+  int numberOfBytes = 1 << ElementSizeLog2Of(memtype.representation());
+  DCHECK(numberOfBytes % stride == 0);
+
+  StoreRepresentation rep(GetTypeForUnalignedAccess(alignment).representation(),
+                          kNoWriteBarrier);
+
+  if (ElementSizeLog2Of(memtype.representation()) <= 2) {
+    shiftOpcode = wasm::kExprI32ShrU;
+    shiftConst = jsgraph()->Int32Constant(8 * stride);
+  } else {
+    shiftOpcode = wasm::kExprI64ShrU;
+    shiftConst = jsgraph()->Int64Constant(8 * stride);
+    extendTo64Bit = true;
+  }
+
+  newValue = val;
+  if (isFloat) {
+    switch (memtype.representation()) {
+      case MachineRepresentation::kFloat64:
+        newValue = Unop(wasm::kExprI64ReinterpretF64, val);
+        break;
+      case MachineRepresentation::kFloat32:
+        newValue = Unop(wasm::kExprI32ReinterpretF32, val);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  }
+
+  Node* baseOffset = MemBuffer(offset);
+
+  for (int i = 0; i < numberOfBytes - stride; i += stride) {
+    store = graph()->NewNode(
+        jsgraph()->machine()->Store(rep),
+        GetUnalignedStoreOffsetNode(baseOffset, numberOfBytes, stride, i),
+        index,
+        extendTo64Bit ? Unop(wasm::kExprI32ConvertI64, newValue) : newValue,
+        *effect_, *control_);
+    newValue = Binop(shiftOpcode, newValue, shiftConst);
+    *effect_ = store;
+  }
+  store = graph()->NewNode(
+      jsgraph()->machine()->Store(rep),
+      GetUnalignedStoreOffsetNode(baseOffset, numberOfBytes, stride,
+                                  numberOfBytes - stride),
+      index,
+      extendTo64Bit ? Unop(wasm::kExprI32ConvertI64, newValue) : newValue,
+      *effect_, *control_);
+  *effect_ = store;
+  return val;
+}
+
 Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
-                                 uint32_t offset, Node* val,
+                                 uint32_t offset, uint32_t alignment, Node* val,
                                  wasm::WasmCodePosition position) {
   Node* store;
+
   // WASM semantics throw on OOB. Introduce explicit bounds check.
   BoundsCheckMem(memtype, index, offset, position);
   StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
-  store = graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
-                           index, val, *effect_, *control_);
-  *effect_ = store;
+  bool aligned = static_cast<int>(alignment) >=
+                 ElementSizeLog2Of(memtype.representation());
+
+  if (aligned ||
+      jsgraph()->machine()->UnalignedStoreSupported(memtype, alignment)) {
+    StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
+    store =
+        graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
+                         index, val, *effect_, *control_);
+    *effect_ = store;
+  } else {
+    store = BuildUnalignedStore(memtype, index, offset, alignment, val);
+  }
+
   return store;
 }
 
@@ -2704,13 +2918,12 @@
     source_position_table_->SetSourcePosition(node, pos);
 }
 
-static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
+static void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
                                       CompilationInfo* info,
                                       const char* message, uint32_t index,
                                       wasm::WasmName func_name) {
   Isolate* isolate = info->isolate();
-  if (isolate->logger()->is_logging_code_events() ||
-      isolate->cpu_profiler()->is_profiling()) {
+  if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
     ScopedVector<char> buffer(128);
     SNPrintF(buffer, "%s#%d:%.*s", message, index, func_name.length(),
              func_name.start());
@@ -2729,7 +2942,7 @@
 Handle<JSFunction> CompileJSToWasmWrapper(
     Isolate* isolate, wasm::ModuleEnv* module, Handle<String> name,
     Handle<Code> wasm_code, Handle<JSObject> module_object, uint32_t index) {
-  wasm::WasmFunction* func = &module->module->functions[index];
+  const wasm::WasmFunction* func = &module->module->functions[index];
 
   //----------------------------------------------------------------------------
   // Create the JSFunction object.
@@ -2808,7 +3021,7 @@
     }
 
     RecordFunctionCompilation(
-        Logger::FUNCTION_TAG, &info, "js-to-wasm", index,
+        CodeEventListener::FUNCTION_TAG, &info, "js-to-wasm", index,
         module->module->GetName(func->name_offset, func->name_length));
     // Set the JSFunction's machine code.
     function->set_code(*code);
@@ -2816,7 +3029,7 @@
   return function;
 }
 
-Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
+Handle<Code> CompileWasmToJSWrapper(Isolate* isolate,
                                     Handle<JSFunction> function,
                                     wasm::FunctionSig* sig,
                                     wasm::WasmName module_name,
@@ -2836,7 +3049,6 @@
   WasmGraphBuilder builder(&zone, &jsgraph, sig);
   builder.set_control_ptr(&control);
   builder.set_effect_ptr(&effect);
-  builder.set_module(module);
   builder.BuildWasmToJSWrapper(function, sig);
 
   Handle<Code> code = Handle<Code>::null();
@@ -2881,238 +3093,186 @@
       buffer.Dispose();
     }
 
-    RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, "wasm-to-js", 0,
-                              module_name);
+    RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, &info,
+                              "wasm-to-js", 0, module_name);
   }
   return code;
 }
 
-std::pair<JSGraph*, SourcePositionTable*> BuildGraphForWasmFunction(
-    JSGraph* jsgraph, wasm::ErrorThrower* thrower, Isolate* isolate,
-    wasm::ModuleEnv*& module_env, const wasm::WasmFunction* function,
+SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
     double* decode_ms) {
   base::ElapsedTimer decode_timer;
   if (FLAG_trace_wasm_decode_time) {
     decode_timer.Start();
   }
   // Create a TF graph during decoding.
-  Graph* graph = jsgraph->graph();
-  CommonOperatorBuilder* common = jsgraph->common();
-  MachineOperatorBuilder* machine = jsgraph->machine();
+
+  Graph* graph = jsgraph_->graph();
+  CommonOperatorBuilder* common = jsgraph_->common();
+  MachineOperatorBuilder* machine = jsgraph_->machine();
   SourcePositionTable* source_position_table =
-      new (jsgraph->zone()) SourcePositionTable(graph);
-  WasmGraphBuilder builder(jsgraph->zone(), jsgraph, function->sig,
+      new (jsgraph_->zone()) SourcePositionTable(graph);
+  WasmGraphBuilder builder(jsgraph_->zone(), jsgraph_, function_->sig,
                            source_position_table);
   wasm::FunctionBody body = {
-      module_env, function->sig, module_env->module->module_start,
-      module_env->module->module_start + function->code_start_offset,
-      module_env->module->module_start + function->code_end_offset};
-  wasm::TreeResult result =
-      wasm::BuildTFGraph(isolate->allocator(), &builder, body);
+      module_env_, function_->sig, module_env_->module->module_start,
+      module_env_->module->module_start + function_->code_start_offset,
+      module_env_->module->module_start + function_->code_end_offset};
+  graph_construction_result_ =
+      wasm::BuildTFGraph(isolate_->allocator(), &builder, body);
+
+  if (graph_construction_result_.failed()) {
+    if (FLAG_trace_wasm_compiler) {
+      OFStream os(stdout);
+      os << "Compilation failed: " << graph_construction_result_ << std::endl;
+    }
+    return nullptr;
+  }
 
   if (machine->Is32()) {
-    Int64Lowering r(graph, machine, common, jsgraph->zone(), function->sig);
+    Int64Lowering r(graph, machine, common, jsgraph_->zone(), function_->sig);
     r.LowerGraph();
   }
 
-  if (result.failed()) {
-    if (FLAG_trace_wasm_compiler) {
-      OFStream os(stdout);
-      os << "Compilation failed: " << result << std::endl;
-    }
-    // Add the function as another context for the exception
-    ScopedVector<char> buffer(128);
-    wasm::WasmName name = module_env->module->GetName(function->name_offset,
-                                                      function->name_length);
-    SNPrintF(buffer, "Compiling WASM function #%d:%.*s failed:",
-             function->func_index, name.length(), name.start());
-    thrower->Failed(buffer.start(), result);
-    return std::make_pair(nullptr, nullptr);
-  }
-  int index = static_cast<int>(function->func_index);
+  int index = static_cast<int>(function_->func_index);
   if (index >= FLAG_trace_wasm_ast_start && index < FLAG_trace_wasm_ast_end) {
-    PrintAst(isolate->allocator(), body);
+    OFStream os(stdout);
+    PrintAst(isolate_->allocator(), body, os, nullptr);
   }
   if (FLAG_trace_wasm_decode_time) {
     *decode_ms = decode_timer.Elapsed().InMillisecondsF();
   }
-  return std::make_pair(jsgraph, source_position_table);
+  return source_position_table;
 }
 
-class WasmCompilationUnit {
- public:
-  WasmCompilationUnit(wasm::ErrorThrower* thrower, Isolate* isolate,
-                      wasm::ModuleEnv* module_env,
-                      const wasm::WasmFunction* function, uint32_t index)
-      : thrower_(thrower),
-        isolate_(isolate),
-        module_env_(module_env),
-        function_(function),
-        graph_zone_(new Zone(isolate->allocator())),
-        jsgraph_(new (graph_zone()) JSGraph(
-            isolate, new (graph_zone()) Graph(graph_zone()),
-            new (graph_zone()) CommonOperatorBuilder(graph_zone()), nullptr,
-            nullptr,
-            new (graph_zone()) MachineOperatorBuilder(
-                graph_zone(), MachineType::PointerRepresentation(),
-                InstructionSelector::SupportedMachineOperatorFlags()))),
-        compilation_zone_(isolate->allocator()),
-        info_(function->name_length != 0
-                  ? module_env->module->GetNameOrNull(function->name_offset,
-                                                      function->name_length)
-                  : ArrayVector("wasm"),
-              isolate, &compilation_zone_,
-              Code::ComputeFlags(Code::WASM_FUNCTION)),
-        job_(),
-        index_(index),
-        ok_(true) {
-    // Create and cache this node in the main thread.
-    jsgraph_->CEntryStubConstant(1);
+WasmCompilationUnit::WasmCompilationUnit(wasm::ErrorThrower* thrower,
+                                         Isolate* isolate,
+                                         wasm::ModuleEnv* module_env,
+                                         const wasm::WasmFunction* function,
+                                         uint32_t index)
+    : thrower_(thrower),
+      isolate_(isolate),
+      module_env_(module_env),
+      function_(function),
+      graph_zone_(new Zone(isolate->allocator())),
+      jsgraph_(new (graph_zone()) JSGraph(
+          isolate, new (graph_zone()) Graph(graph_zone()),
+          new (graph_zone()) CommonOperatorBuilder(graph_zone()), nullptr,
+          nullptr, new (graph_zone()) MachineOperatorBuilder(
+                       graph_zone(), MachineType::PointerRepresentation(),
+                       InstructionSelector::SupportedMachineOperatorFlags()))),
+      compilation_zone_(isolate->allocator()),
+      info_(function->name_length != 0
+                ? module_env->module->GetNameOrNull(function->name_offset,
+                                                    function->name_length)
+                : ArrayVector("wasm"),
+            isolate, &compilation_zone_,
+            Code::ComputeFlags(Code::WASM_FUNCTION)),
+      job_(),
+      index_(index),
+      ok_(true) {
+  // Create and cache this node in the main thread.
+  jsgraph_->CEntryStubConstant(1);
+}
+
+void WasmCompilationUnit::ExecuteCompilation() {
+  // TODO(ahaas): The counters are not thread-safe at the moment.
+  //    HistogramTimerScope wasm_compile_function_time_scope(
+  //        isolate_->counters()->wasm_compile_function_time());
+  if (FLAG_trace_wasm_compiler) {
+    OFStream os(stdout);
+    os << "Compiling WASM function "
+       << wasm::WasmFunctionName(function_, module_env_) << std::endl;
+    os << std::endl;
   }
 
-  Zone* graph_zone() { return graph_zone_.get(); }
+  double decode_ms = 0;
+  size_t node_count = 0;
 
-  void ExecuteCompilation() {
-    // TODO(ahaas): The counters are not thread-safe at the moment.
-    //    HistogramTimerScope wasm_compile_function_time_scope(
-    //        isolate_->counters()->wasm_compile_function_time());
-    if (FLAG_trace_wasm_compiler) {
-      OFStream os(stdout);
-      os << "Compiling WASM function "
-         << wasm::WasmFunctionName(function_, module_env_) << std::endl;
-      os << std::endl;
-    }
+  base::SmartPointer<Zone> graph_zone(graph_zone_.Detach());
+  SourcePositionTable* source_positions = BuildGraphForWasmFunction(&decode_ms);
 
-    double decode_ms = 0;
-    size_t node_count = 0;
-
-    base::SmartPointer<Zone> graph_zone(graph_zone_.Detach());
-    std::pair<JSGraph*, SourcePositionTable*> graph_result =
-        BuildGraphForWasmFunction(jsgraph_, thrower_, isolate_, module_env_,
-                                  function_, &decode_ms);
-    JSGraph* jsgraph = graph_result.first;
-    SourcePositionTable* source_positions = graph_result.second;
-
-    if (jsgraph == nullptr) {
-      ok_ = false;
-      return;
-    }
-
-    base::ElapsedTimer pipeline_timer;
-    if (FLAG_trace_wasm_decode_time) {
-      node_count = jsgraph->graph()->NodeCount();
-      pipeline_timer.Start();
-    }
-
-    // Run the compiler pipeline to generate machine code.
-    CallDescriptor* descriptor = wasm::ModuleEnv::GetWasmCallDescriptor(
-        &compilation_zone_, function_->sig);
-    if (jsgraph->machine()->Is32()) {
-      descriptor =
-          module_env_->GetI32WasmCallDescriptor(&compilation_zone_, descriptor);
-    }
-    job_.Reset(Pipeline::NewWasmCompilationJob(&info_, jsgraph->graph(),
-                                               descriptor, source_positions));
-    ok_ = job_->OptimizeGraph() == CompilationJob::SUCCEEDED;
-    // TODO(bradnelson): Improve histogram handling of size_t.
-    // TODO(ahaas): The counters are not thread-safe at the moment.
-    //    isolate_->counters()->wasm_compile_function_peak_memory_bytes()
-    // ->AddSample(
-    //        static_cast<int>(jsgraph->graph()->zone()->allocation_size()));
-
-    if (FLAG_trace_wasm_decode_time) {
-      double pipeline_ms = pipeline_timer.Elapsed().InMillisecondsF();
-      PrintF(
-          "wasm-compilation phase 1 ok: %d bytes, %0.3f ms decode, %zu nodes, "
-          "%0.3f ms pipeline\n",
-          static_cast<int>(function_->code_end_offset -
-                           function_->code_start_offset),
-          decode_ms, node_count, pipeline_ms);
-    }
+  if (graph_construction_result_.failed()) {
+    ok_ = false;
+    return;
   }
 
-  Handle<Code> FinishCompilation() {
-    if (!ok_) {
-      return Handle<Code>::null();
-    }
-    if (job_->GenerateCode() != CompilationJob::SUCCEEDED) {
-      return Handle<Code>::null();
-    }
-    base::ElapsedTimer compile_timer;
-    if (FLAG_trace_wasm_decode_time) {
-      compile_timer.Start();
-    }
-    Handle<Code> code = info_.code();
-    DCHECK(!code.is_null());
-    DCHECK(code->deoptimization_data() == nullptr ||
-           code->deoptimization_data()->length() == 0);
-    Handle<FixedArray> deopt_data =
-        isolate_->factory()->NewFixedArray(2, TENURED);
-    if (!module_env_->instance->js_object.is_null()) {
-      deopt_data->set(0, *module_env_->instance->js_object);
-    }
-    deopt_data->set(1, Smi::FromInt(function_->func_index));
-    deopt_data->set_length(2);
-    code->set_deoptimization_data(*deopt_data);
-
-    RecordFunctionCompilation(
-        Logger::FUNCTION_TAG, &info_, "WASM_function", function_->func_index,
-        module_env_->module->GetName(function_->name_offset,
-                                     function_->name_length));
-
-    if (FLAG_trace_wasm_decode_time) {
-      double compile_ms = compile_timer.Elapsed().InMillisecondsF();
-      PrintF("wasm-code-generation ok: %d bytes, %0.3f ms code generation\n",
-             static_cast<int>(function_->code_end_offset -
-                              function_->code_start_offset),
-             compile_ms);
-    }
-
-    return code;
+  base::ElapsedTimer pipeline_timer;
+  if (FLAG_trace_wasm_decode_time) {
+    node_count = jsgraph_->graph()->NodeCount();
+    pipeline_timer.Start();
   }
 
-  wasm::ErrorThrower* thrower_;
-  Isolate* isolate_;
-  wasm::ModuleEnv* module_env_;
-  const wasm::WasmFunction* function_;
-  // The graph zone is deallocated at the end of ExecuteCompilation.
-  base::SmartPointer<Zone> graph_zone_;
-  JSGraph* jsgraph_;
-  Zone compilation_zone_;
-  CompilationInfo info_;
-  base::SmartPointer<CompilationJob> job_;
-  uint32_t index_;
-  bool ok_;
-};
+  // Run the compiler pipeline to generate machine code.
+  CallDescriptor* descriptor = wasm::ModuleEnv::GetWasmCallDescriptor(
+      &compilation_zone_, function_->sig);
+  if (jsgraph_->machine()->Is32()) {
+    descriptor =
+        module_env_->GetI32WasmCallDescriptor(&compilation_zone_, descriptor);
+  }
+  job_.Reset(Pipeline::NewWasmCompilationJob(&info_, jsgraph_->graph(),
+                                             descriptor, source_positions));
 
-WasmCompilationUnit* CreateWasmCompilationUnit(
-    wasm::ErrorThrower* thrower, Isolate* isolate, wasm::ModuleEnv* module_env,
-    const wasm::WasmFunction* function, uint32_t index) {
-  return new WasmCompilationUnit(thrower, isolate, module_env, function, index);
+  // The function name {OptimizeGraph()} is misleading but necessary because we
+  // want to use the CompilationJob interface. A better name would be
+  // ScheduleGraphAndSelectInstructions.
+  ok_ = job_->OptimizeGraph() == CompilationJob::SUCCEEDED;
+  // TODO(bradnelson): Improve histogram handling of size_t.
+  // TODO(ahaas): The counters are not thread-safe at the moment.
+  //    isolate_->counters()->wasm_compile_function_peak_memory_bytes()
+  // ->AddSample(
+  //        static_cast<int>(jsgraph->graph()->zone()->allocation_size()));
+
+  if (FLAG_trace_wasm_decode_time) {
+    double pipeline_ms = pipeline_timer.Elapsed().InMillisecondsF();
+    PrintF(
+        "wasm-compilation phase 1 ok: %d bytes, %0.3f ms decode, %zu nodes, "
+        "%0.3f ms pipeline\n",
+        static_cast<int>(function_->code_end_offset -
+                         function_->code_start_offset),
+        decode_ms, node_count, pipeline_ms);
+  }
 }
 
-void ExecuteCompilation(WasmCompilationUnit* unit) {
-  unit->ExecuteCompilation();
-}
+Handle<Code> WasmCompilationUnit::FinishCompilation() {
+  if (!ok_) {
+    if (graph_construction_result_.failed()) {
+      // Add the function as another context for the exception
+      ScopedVector<char> buffer(128);
+      wasm::WasmName name = module_env_->module->GetName(
+          function_->name_offset, function_->name_length);
+      SNPrintF(buffer, "Compiling WASM function #%d:%.*s failed:",
+               function_->func_index, name.length(), name.start());
+      thrower_->Failed(buffer.start(), graph_construction_result_);
+    }
 
-uint32_t GetIndexOfWasmCompilationUnit(WasmCompilationUnit* unit) {
-  return unit->index_;
-}
+    return Handle<Code>::null();
+  }
+  if (job_->GenerateCode() != CompilationJob::SUCCEEDED) {
+    return Handle<Code>::null();
+  }
+  base::ElapsedTimer compile_timer;
+  if (FLAG_trace_wasm_decode_time) {
+    compile_timer.Start();
+  }
+  Handle<Code> code = info_.code();
+  DCHECK(!code.is_null());
 
-Handle<Code> FinishCompilation(WasmCompilationUnit* unit) {
-  Handle<Code> result = unit->FinishCompilation();
-  delete unit;
-  return result;
-}
+  RecordFunctionCompilation(
+      CodeEventListener::FUNCTION_TAG, &info_, "WASM_function",
+      function_->func_index,
+      module_env_->module->GetName(function_->name_offset,
+                                   function_->name_length));
 
-// Helper function to compile a single function.
-Handle<Code> CompileWasmFunction(wasm::ErrorThrower* thrower, Isolate* isolate,
-                                 wasm::ModuleEnv* module_env,
-                                 const wasm::WasmFunction* function) {
-  WasmCompilationUnit* unit =
-      CreateWasmCompilationUnit(thrower, isolate, module_env, function, 0);
-  ExecuteCompilation(unit);
-  return FinishCompilation(unit);
+  if (FLAG_trace_wasm_decode_time) {
+    double compile_ms = compile_timer.Elapsed().InMillisecondsF();
+    PrintF("wasm-code-generation ok: %d bytes, %0.3f ms code generation\n",
+           static_cast<int>(function_->code_end_offset -
+                            function_->code_start_offset),
+           compile_ms);
+  }
+
+  return code;
 }
 
 }  // namespace compiler
diff --git a/src/compiler/wasm-compiler.h b/src/compiler/wasm-compiler.h
index 93c2ae9..c03de3d 100644
--- a/src/compiler/wasm-compiler.h
+++ b/src/compiler/wasm-compiler.h
@@ -7,7 +7,9 @@
 
 // Clients of this interface shouldn't depend on lots of compiler internals.
 // Do not include anything from src/compiler here!
+#include "src/compiler.h"
 #include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-result.h"
 #include "src/zone.h"
 
 namespace v8 {
@@ -20,28 +22,62 @@
 class Graph;
 class Operator;
 class SourcePositionTable;
-class WasmCompilationUnit;
-}
+}  // namespace compiler
 
 namespace wasm {
 // Forward declarations for some WASM data structures.
 struct ModuleEnv;
 struct WasmFunction;
 class ErrorThrower;
+struct Tree;
 
 // Expose {Node} and {Graph} opaquely as {wasm::TFNode} and {wasm::TFGraph}.
 typedef compiler::Node TFNode;
 typedef compiler::JSGraph TFGraph;
-}
+}  // namespace wasm
 
 namespace compiler {
-// Compiles a single function, producing a code object.
-Handle<Code> CompileWasmFunction(wasm::ErrorThrower* thrower, Isolate* isolate,
-                                 wasm::ModuleEnv* module_env,
-                                 const wasm::WasmFunction* function);
+class WasmCompilationUnit final {
+ public:
+  WasmCompilationUnit(wasm::ErrorThrower* thrower, Isolate* isolate,
+                      wasm::ModuleEnv* module_env,
+                      const wasm::WasmFunction* function, uint32_t index);
+
+  Zone* graph_zone() { return graph_zone_.get(); }
+  int index() const { return index_; }
+
+  void ExecuteCompilation();
+  Handle<Code> FinishCompilation();
+
+  static Handle<Code> CompileWasmFunction(wasm::ErrorThrower* thrower,
+                                          Isolate* isolate,
+                                          wasm::ModuleEnv* module_env,
+                                          const wasm::WasmFunction* function) {
+    WasmCompilationUnit unit(thrower, isolate, module_env, function, 0);
+    unit.ExecuteCompilation();
+    return unit.FinishCompilation();
+  }
+
+ private:
+  SourcePositionTable* BuildGraphForWasmFunction(double* decode_ms);
+
+  wasm::ErrorThrower* thrower_;
+  Isolate* isolate_;
+  wasm::ModuleEnv* module_env_;
+  const wasm::WasmFunction* function_;
+  // The graph zone is deallocated at the end of ExecuteCompilation.
+  base::SmartPointer<Zone> graph_zone_;
+  JSGraph* jsgraph_;
+  Zone compilation_zone_;
+  CompilationInfo info_;
+  base::SmartPointer<CompilationJob> job_;
+  uint32_t index_;
+  wasm::Result<wasm::Tree*> graph_construction_result_;
+  bool ok_;
+};
 
 // Wraps a JS function, producing a code object that can be called from WASM.
-Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
+Handle<Code> CompileWasmToJSWrapper(Isolate* isolate,
                                     Handle<JSFunction> function,
                                     wasm::FunctionSig* sig,
                                     wasm::WasmName module_name,
@@ -53,16 +89,6 @@
     Isolate* isolate, wasm::ModuleEnv* module, Handle<String> name,
     Handle<Code> wasm_code, Handle<JSObject> module_object, uint32_t index);
 
-WasmCompilationUnit* CreateWasmCompilationUnit(
-    wasm::ErrorThrower* thrower, Isolate* isolate, wasm::ModuleEnv* module_env,
-    const wasm::WasmFunction* function, uint32_t index);
-
-void ExecuteCompilation(WasmCompilationUnit* unit);
-
-Handle<Code> FinishCompilation(WasmCompilationUnit* unit);
-
-uint32_t GetIndexOfWasmCompilationUnit(WasmCompilationUnit* unit);
-
 // Abstracts details of building TurboFan graph nodes for WASM to separate
 // the WASM decoder from the internal details of TurboFan.
 class WasmTrapHelper;
@@ -141,8 +167,10 @@
   Node* LoadGlobal(uint32_t index);
   Node* StoreGlobal(uint32_t index, Node* val);
   Node* LoadMem(wasm::LocalType type, MachineType memtype, Node* index,
-                uint32_t offset, wasm::WasmCodePosition position);
-  Node* StoreMem(MachineType type, Node* index, uint32_t offset, Node* val,
+                uint32_t offset, uint32_t alignment,
+                wasm::WasmCodePosition position);
+  Node* StoreMem(MachineType type, Node* index, uint32_t offset,
+                 uint32_t alignment, Node* val,
                  wasm::WasmCodePosition position);
 
   static void PrintDebugName(Node* node);
@@ -193,6 +221,19 @@
   void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset,
                       wasm::WasmCodePosition position);
 
+  MachineType GetTypeForUnalignedAccess(uint32_t alignment,
+                                        bool signExtend = false);
+
+  Node* GetUnalignedLoadOffsetNode(Node* baseOffset, int numberOfBytes,
+                                   int stride, int current);
+
+  Node* BuildUnalignedLoad(wasm::LocalType type, MachineType memtype,
+                           Node* index, uint32_t offset, uint32_t alignment);
+  Node* GetUnalignedStoreOffsetNode(Node* baseOffset, int numberOfBytes,
+                                    int stride, int current);
+  Node* BuildUnalignedStore(MachineType memtype, Node* index, uint32_t offset,
+                            uint32_t alignment, Node* val);
+
   Node* MaskShiftCount32(Node* node);
   Node* MaskShiftCount64(Node* node);
 
@@ -234,14 +275,7 @@
 
   Node* BuildF64Acos(Node* input);
   Node* BuildF64Asin(Node* input);
-  Node* BuildF64Atan(Node* input);
-  Node* BuildF64Cos(Node* input);
-  Node* BuildF64Sin(Node* input);
-  Node* BuildF64Tan(Node* input);
-  Node* BuildF64Exp(Node* input);
-  Node* BuildF64Log(Node* input);
   Node* BuildF64Pow(Node* left, Node* right);
-  Node* BuildF64Atan2(Node* left, Node* right);
   Node* BuildF64Mod(Node* left, Node* right);
 
   Node* BuildIntToFloatConversionInstruction(
diff --git a/src/compiler/wasm-linkage.cc b/src/compiler/wasm-linkage.cc
index 41acf55..cfeb6c5 100644
--- a/src/compiler/wasm-linkage.cc
+++ b/src/compiler/wasm-linkage.cc
@@ -4,6 +4,7 @@
 
 #include "src/assembler.h"
 #include "src/macro-assembler.h"
+#include "src/register-configuration.h"
 
 #include "src/wasm/wasm-module.h"
 
@@ -31,6 +32,8 @@
       return MachineType::Float64();
     case kAstF32:
       return MachineType::Float32();
+    case kAstS128:
+      return MachineType::Simd128();
     default:
       UNREACHABLE();
       return MachineType::AnyTagged();
@@ -176,7 +179,18 @@
     if (IsFloatingPoint(type)) {
       // Allocate a floating point register/stack location.
       if (fp_offset < fp_count) {
-        return regloc(fp_regs[fp_offset++]);
+        DoubleRegister reg = fp_regs[fp_offset++];
+#if V8_TARGET_ARCH_ARM
+        // Allocate floats using a double register, but modify the code to
+        // reflect how ARM FP registers alias.
+        // TODO(bbudge) Modify wasm linkage to allow use of all float regs.
+        if (type == kAstF32) {
+          int float_reg_code = reg.code() * 2;
+          DCHECK(float_reg_code < RegisterConfiguration::kMaxFPRegisters);
+          return regloc(DoubleRegister::from_code(float_reg_code));
+        }
+#endif
+        return regloc(reg);
       } else {
         int offset = -1 - stack_offset;
         stack_offset += Words(type);
@@ -197,11 +211,7 @@
     return type == kAstF32 || type == kAstF64;
   }
   int Words(LocalType type) {
-    // The code generation for pushing parameters on the stack does not
-    // distinguish between float32 and float64. Therefore also float32 needs
-    // two words.
-    if (kPointerSize < 8 &&
-        (type == kAstI64 || type == kAstF64 || type == kAstF32)) {
+    if (kPointerSize < 8 && (type == kAstI64 || type == kAstF64)) {
       return 2;
     }
     return 1;
diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc
index a90a584..2ae1fc9 100644
--- a/src/compiler/x64/code-generator-x64.cc
+++ b/src/compiler/x64/code-generator-x64.cc
@@ -18,10 +18,6 @@
 
 #define __ masm()->
 
-
-#define kScratchDoubleReg xmm0
-
-
 // Adds X64 specific methods for decoding operands.
 class X64OperandConverter : public InstructionOperandConverter {
  public:
@@ -45,7 +41,8 @@
       return Immediate(0);
     }
     if (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-        constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+        constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE ||
+        constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
       return Immediate(constant.ToInt32(), constant.rmode());
     }
     return Immediate(constant.ToInt32());
@@ -389,24 +386,26 @@
       ool = new (zone()) OutOfLineLoadNaN(this, result);                     \
     } else {                                                                 \
       auto length = i.InputUint32(3);                                        \
+      RelocInfo::Mode rmode = i.ToConstant(instr->InputAt(3)).rmode();       \
       DCHECK_LE(index2, length);                                             \
-      __ cmpl(index1, Immediate(length - index2));                           \
+      __ cmpl(index1, Immediate(length - index2, rmode));                    \
       class OutOfLineLoadFloat final : public OutOfLineCode {                \
        public:                                                               \
         OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result,           \
                            Register buffer, Register index1, int32_t index2, \
-                           int32_t length)                                   \
+                           int32_t length, RelocInfo::Mode rmode)            \
             : OutOfLineCode(gen),                                            \
               result_(result),                                               \
               buffer_(buffer),                                               \
               index1_(index1),                                               \
               index2_(index2),                                               \
-              length_(length) {}                                             \
+              length_(length),                                               \
+              rmode_(rmode) {}                                               \
                                                                              \
         void Generate() final {                                              \
           __ leal(kScratchRegister, Operand(index1_, index2_));              \
           __ Pcmpeqd(result_, result_);                                      \
-          __ cmpl(kScratchRegister, Immediate(length_));                     \
+          __ cmpl(kScratchRegister, Immediate(length_, rmode_));             \
           __ j(above_equal, exit());                                         \
           __ asm_instr(result_,                                              \
                        Operand(buffer_, kScratchRegister, times_1, 0));      \
@@ -418,9 +417,10 @@
         Register const index1_;                                              \
         int32_t const index2_;                                               \
         int32_t const length_;                                               \
+        RelocInfo::Mode rmode_;                                              \
       };                                                                     \
-      ool = new (zone())                                                     \
-          OutOfLineLoadFloat(this, result, buffer, index1, index2, length);  \
+      ool = new (zone()) OutOfLineLoadFloat(this, result, buffer, index1,    \
+                                            index2, length, rmode);          \
     }                                                                        \
     __ j(above_equal, ool->entry());                                         \
     __ asm_instr(result, Operand(buffer, index1, times_1, index2));          \
@@ -441,24 +441,26 @@
       ool = new (zone()) OutOfLineLoadZero(this, result);                      \
     } else {                                                                   \
       auto length = i.InputUint32(3);                                          \
+      RelocInfo::Mode rmode = i.ToConstant(instr->InputAt(3)).rmode();         \
       DCHECK_LE(index2, length);                                               \
-      __ cmpl(index1, Immediate(length - index2));                             \
+      __ cmpl(index1, Immediate(length - index2, rmode));                      \
       class OutOfLineLoadInteger final : public OutOfLineCode {                \
        public:                                                                 \
         OutOfLineLoadInteger(CodeGenerator* gen, Register result,              \
                              Register buffer, Register index1, int32_t index2, \
-                             int32_t length)                                   \
+                             int32_t length, RelocInfo::Mode rmode)            \
             : OutOfLineCode(gen),                                              \
               result_(result),                                                 \
               buffer_(buffer),                                                 \
               index1_(index1),                                                 \
               index2_(index2),                                                 \
-              length_(length) {}                                               \
+              length_(length),                                                 \
+              rmode_(rmode) {}                                                 \
                                                                                \
         void Generate() final {                                                \
           Label oob;                                                           \
           __ leal(kScratchRegister, Operand(index1_, index2_));                \
-          __ cmpl(kScratchRegister, Immediate(length_));                       \
+          __ cmpl(kScratchRegister, Immediate(length_, rmode_));               \
           __ j(above_equal, &oob, Label::kNear);                               \
           __ asm_instr(result_,                                                \
                        Operand(buffer_, kScratchRegister, times_1, 0));        \
@@ -473,9 +475,10 @@
         Register const index1_;                                                \
         int32_t const index2_;                                                 \
         int32_t const length_;                                                 \
+        RelocInfo::Mode const rmode_;                                          \
       };                                                                       \
-      ool = new (zone())                                                       \
-          OutOfLineLoadInteger(this, result, buffer, index1, index2, length);  \
+      ool = new (zone()) OutOfLineLoadInteger(this, result, buffer, index1,    \
+                                              index2, length, rmode);          \
     }                                                                          \
     __ j(above_equal, ool->entry());                                           \
     __ asm_instr(result, Operand(buffer, index1, times_1, index2));            \
@@ -498,23 +501,25 @@
       __ bind(&done);                                                        \
     } else {                                                                 \
       auto length = i.InputUint32(3);                                        \
+      RelocInfo::Mode rmode = i.ToConstant(instr->InputAt(3)).rmode();       \
       DCHECK_LE(index2, length);                                             \
-      __ cmpl(index1, Immediate(length - index2));                           \
+      __ cmpl(index1, Immediate(length - index2, rmode));                    \
       class OutOfLineStoreFloat final : public OutOfLineCode {               \
        public:                                                               \
         OutOfLineStoreFloat(CodeGenerator* gen, Register buffer,             \
                             Register index1, int32_t index2, int32_t length, \
-                            XMMRegister value)                               \
+                            XMMRegister value, RelocInfo::Mode rmode)        \
             : OutOfLineCode(gen),                                            \
               buffer_(buffer),                                               \
               index1_(index1),                                               \
               index2_(index2),                                               \
               length_(length),                                               \
-              value_(value) {}                                               \
+              value_(value),                                                 \
+              rmode_(rmode) {}                                               \
                                                                              \
         void Generate() final {                                              \
           __ leal(kScratchRegister, Operand(index1_, index2_));              \
-          __ cmpl(kScratchRegister, Immediate(length_));                     \
+          __ cmpl(kScratchRegister, Immediate(length_, rmode_));             \
           __ j(above_equal, exit());                                         \
           __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0),       \
                        value_);                                              \
@@ -526,9 +531,10 @@
         int32_t const index2_;                                               \
         int32_t const length_;                                               \
         XMMRegister const value_;                                            \
+        RelocInfo::Mode rmode_;                                              \
       };                                                                     \
-      auto ool = new (zone())                                                \
-          OutOfLineStoreFloat(this, buffer, index1, index2, length, value);  \
+      auto ool = new (zone()) OutOfLineStoreFloat(                           \
+          this, buffer, index1, index2, length, value, rmode);               \
       __ j(above_equal, ool->entry());                                       \
       __ asm_instr(Operand(buffer, index1, times_1, index2), value);         \
       __ bind(ool->exit());                                                  \
@@ -550,23 +556,25 @@
       __ bind(&done);                                                          \
     } else {                                                                   \
       auto length = i.InputUint32(3);                                          \
+      RelocInfo::Mode rmode = i.ToConstant(instr->InputAt(3)).rmode();         \
       DCHECK_LE(index2, length);                                               \
-      __ cmpl(index1, Immediate(length - index2));                             \
+      __ cmpl(index1, Immediate(length - index2, rmode));                      \
       class OutOfLineStoreInteger final : public OutOfLineCode {               \
        public:                                                                 \
         OutOfLineStoreInteger(CodeGenerator* gen, Register buffer,             \
                               Register index1, int32_t index2, int32_t length, \
-                              Value value)                                     \
+                              Value value, RelocInfo::Mode rmode)              \
             : OutOfLineCode(gen),                                              \
               buffer_(buffer),                                                 \
               index1_(index1),                                                 \
               index2_(index2),                                                 \
               length_(length),                                                 \
-              value_(value) {}                                                 \
+              value_(value),                                                   \
+              rmode_(rmode) {}                                                 \
                                                                                \
         void Generate() final {                                                \
           __ leal(kScratchRegister, Operand(index1_, index2_));                \
-          __ cmpl(kScratchRegister, Immediate(length_));                       \
+          __ cmpl(kScratchRegister, Immediate(length_, rmode_));               \
           __ j(above_equal, exit());                                           \
           __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0),         \
                        value_);                                                \
@@ -578,9 +586,10 @@
         int32_t const index2_;                                                 \
         int32_t const length_;                                                 \
         Value const value_;                                                    \
+        RelocInfo::Mode rmode_;                                                \
       };                                                                       \
-      auto ool = new (zone())                                                  \
-          OutOfLineStoreInteger(this, buffer, index1, index2, length, value);  \
+      auto ool = new (zone()) OutOfLineStoreInteger(                           \
+          this, buffer, index1, index2, length, value, rmode);                 \
       __ j(above_equal, ool->entry());                                         \
       __ asm_instr(Operand(buffer, index1, times_1, index2), value);           \
       __ bind(ool->exit());                                                    \
@@ -598,6 +607,20 @@
     }                                                            \
   } while (false)
 
+#define ASSEMBLE_IEEE754_BINOP(name)                                          \
+  do {                                                                        \
+    __ PrepareCallCFunction(2);                                               \
+    __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+                     2);                                                      \
+  } while (false)
+
+#define ASSEMBLE_IEEE754_UNOP(name)                                           \
+  do {                                                                        \
+    __ PrepareCallCFunction(1);                                               \
+    __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+                     1);                                                      \
+  } while (false)
+
 void CodeGenerator::AssembleDeconstructFrame() {
   __ movq(rsp, rbp);
   __ popq(rbp);
@@ -763,6 +786,14 @@
     case kArchTableSwitch:
       AssembleArchTableSwitch(instr);
       break;
+    case kArchComment: {
+      Address comment_string = i.InputExternalReference(0).address();
+      __ RecordComment(reinterpret_cast<const char*>(comment_string));
+      break;
+    }
+    case kArchDebugBreak:
+      __ int3();
+      break;
     case kArchNop:
     case kArchThrowTerminator:
       // don't emit code for nops.
@@ -836,6 +867,45 @@
       __ leaq(i.OutputRegister(), Operand(base, offset.offset()));
       break;
     }
+    case kIeee754Float64Atan:
+      ASSEMBLE_IEEE754_UNOP(atan);
+      break;
+    case kIeee754Float64Atan2:
+      ASSEMBLE_IEEE754_BINOP(atan2);
+      break;
+    case kIeee754Float64Atanh:
+      ASSEMBLE_IEEE754_UNOP(atanh);
+      break;
+    case kIeee754Float64Cbrt:
+      ASSEMBLE_IEEE754_UNOP(cbrt);
+      break;
+    case kIeee754Float64Cos:
+      ASSEMBLE_IEEE754_UNOP(cos);
+      break;
+    case kIeee754Float64Exp:
+      ASSEMBLE_IEEE754_UNOP(exp);
+      break;
+    case kIeee754Float64Expm1:
+      ASSEMBLE_IEEE754_UNOP(expm1);
+      break;
+    case kIeee754Float64Log:
+      ASSEMBLE_IEEE754_UNOP(log);
+      break;
+    case kIeee754Float64Log1p:
+      ASSEMBLE_IEEE754_UNOP(log1p);
+      break;
+    case kIeee754Float64Log2:
+      ASSEMBLE_IEEE754_UNOP(log2);
+      break;
+    case kIeee754Float64Log10:
+      ASSEMBLE_IEEE754_UNOP(log10);
+      break;
+    case kIeee754Float64Sin:
+      ASSEMBLE_IEEE754_UNOP(sin);
+      break;
+    case kIeee754Float64Tan:
+      ASSEMBLE_IEEE754_UNOP(tan);
+      break;
     case kX64Add32:
       ASSEMBLE_BINOP(addl);
       break;
@@ -1528,6 +1598,10 @@
       }
       break;
     }
+    case kSSEFloat64SilenceNaN:
+      __ Xorpd(kScratchDoubleReg, kScratchDoubleReg);
+      __ Subsd(i.InputDoubleRegister(0), kScratchDoubleReg);
+      break;
     case kX64Movsxbl:
       ASSEMBLE_MOVX(movsxbl);
       __ AssertZeroExtended(i.OutputRegister());
@@ -2134,7 +2208,8 @@
                                                : kScratchRegister;
       switch (src.type()) {
         case Constant::kInt32: {
-          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE) {
+          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
             __ movq(dst, src.ToInt64(), src.rmode());
           } else {
             // TODO(dcarney): don't need scratch in this case.
@@ -2148,7 +2223,8 @@
           break;
         }
         case Constant::kInt64:
-          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE) {
+          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
             __ movq(dst, src.ToInt64(), src.rmode());
           } else {
             DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
@@ -2224,10 +2300,9 @@
       XMMRegister dst = g.ToDoubleRegister(destination);
       __ Movsd(dst, src);
     } else {
-      // We rely on having xmm0 available as a fixed scratch register.
       Operand dst = g.ToOperand(destination);
-      __ Movsd(xmm0, src);
-      __ Movsd(dst, xmm0);
+      __ Movsd(kScratchDoubleReg, src);
+      __ Movsd(dst, kScratchDoubleReg);
     }
   } else {
     UNREACHABLE();
@@ -2271,21 +2346,19 @@
     dst = g.ToOperand(destination);
     __ popq(dst);
   } else if (source->IsFPRegister() && destination->IsFPRegister()) {
-    // XMM register-register swap. We rely on having xmm0
-    // available as a fixed scratch register.
+    // XMM register-register swap.
     XMMRegister src = g.ToDoubleRegister(source);
     XMMRegister dst = g.ToDoubleRegister(destination);
-    __ Movapd(xmm0, src);
+    __ Movapd(kScratchDoubleReg, src);
     __ Movapd(src, dst);
-    __ Movapd(dst, xmm0);
+    __ Movapd(dst, kScratchDoubleReg);
   } else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
-    // XMM register-memory swap.  We rely on having xmm0
-    // available as a fixed scratch register.
+    // XMM register-memory swap.
     XMMRegister src = g.ToDoubleRegister(source);
     Operand dst = g.ToOperand(destination);
-    __ Movsd(xmm0, src);
+    __ Movsd(kScratchDoubleReg, src);
     __ Movsd(src, dst);
-    __ Movsd(dst, xmm0);
+    __ Movsd(dst, kScratchDoubleReg);
   } else {
     // No other combinations are possible.
     UNREACHABLE();
diff --git a/src/compiler/x64/instruction-codes-x64.h b/src/compiler/x64/instruction-codes-x64.h
index 638e77b..29acee3 100644
--- a/src/compiler/x64/instruction-codes-x64.h
+++ b/src/compiler/x64/instruction-codes-x64.h
@@ -102,6 +102,7 @@
   V(SSEFloat64InsertLowWord32)     \
   V(SSEFloat64InsertHighWord32)    \
   V(SSEFloat64LoadLowWord32)       \
+  V(SSEFloat64SilenceNaN)          \
   V(AVXFloat32Cmp)                 \
   V(AVXFloat32Add)                 \
   V(AVXFloat32Sub)                 \
diff --git a/src/compiler/x64/instruction-scheduler-x64.cc b/src/compiler/x64/instruction-scheduler-x64.cc
index 6133bd8..eecefdb 100644
--- a/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/src/compiler/x64/instruction-scheduler-x64.cc
@@ -104,6 +104,7 @@
     case kSSEFloat64InsertLowWord32:
     case kSSEFloat64InsertHighWord32:
     case kSSEFloat64LoadLowWord32:
+    case kSSEFloat64SilenceNaN:
     case kAVXFloat32Cmp:
     case kAVXFloat32Add:
     case kAVXFloat32Sub:
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
index 47deb02..be56dce 100644
--- a/src/compiler/x64/instruction-selector-x64.cc
+++ b/src/compiler/x64/instruction-selector-x64.cc
@@ -1355,7 +1355,6 @@
   VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
 }
 
-
 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
   VisitRO(this, node, kSSEFloat64Sqrt);
 }
@@ -1405,6 +1404,24 @@
   VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
 }
 
+void InstructionSelector::VisitFloat32Neg(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Neg(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+                                                   InstructionCode opcode) {
+  X64OperandGenerator g(this);
+  Emit(opcode, g.DefineAsFixed(node, xmm0), g.UseFixed(node->InputAt(0), xmm0),
+       g.UseFixed(node->InputAt(1), xmm1))
+      ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+                                                  InstructionCode opcode) {
+  X64OperandGenerator g(this);
+  Emit(opcode, g.DefineAsFixed(node, xmm0), g.UseFixed(node->InputAt(0), xmm0))
+      ->MarkAsCall();
+}
 
 void InstructionSelector::EmitPrepareArguments(
     ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
@@ -1437,7 +1454,7 @@
           g.CanBeImmediate(input.node())
               ? g.UseImmediate(input.node())
               : IsSupported(ATOM) ||
-                        sequence()->IsFloat(GetVirtualRegister(input.node()))
+                        sequence()->IsFP(GetVirtualRegister(input.node()))
                     ? g.UseRegister(input.node())
                     : g.Use(input.node());
       Emit(kX64Push, g.NoOutput(), value);
@@ -2036,6 +2053,12 @@
        g.UseRegister(left), g.Use(right));
 }
 
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat64SilenceNaN, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
 void InstructionSelector::VisitAtomicLoad(Node* node) {
   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
   DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
@@ -2110,6 +2133,13 @@
   return flags;
 }
 
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+  return MachineOperatorBuilder::AlignmentRequirements::
+      FullUnalignedAccessSupport();
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/x87/code-generator-x87.cc b/src/compiler/x87/code-generator-x87.cc
index 0eef24f..6bacda0 100644
--- a/src/compiler/x87/code-generator-x87.cc
+++ b/src/compiler/x87/code-generator-x87.cc
@@ -61,6 +61,7 @@
     Constant constant = ToConstant(operand);
     if (constant.type() == Constant::kInt32 &&
         (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+         constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
          constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE)) {
       return Immediate(reinterpret_cast<Address>(constant.ToInt32()),
                        constant.rmode());
@@ -113,8 +114,8 @@
       }
       case kMode_MRI: {
         Register base = InputRegister(NextOffset(offset));
-        int32_t disp = InputInt32(NextOffset(offset));
-        return Operand(base, disp);
+        Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+        return Operand(base, ctant.ToInt32(), ctant.rmode());
       }
       case kMode_MR1:
       case kMode_MR2:
@@ -133,8 +134,8 @@
         Register base = InputRegister(NextOffset(offset));
         Register index = InputRegister(NextOffset(offset));
         ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
-        int32_t disp = InputInt32(NextOffset(offset));
-        return Operand(base, index, scale, disp);
+        Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+        return Operand(base, index, scale, ctant.ToInt32(), ctant.rmode());
       }
       case kMode_M1:
       case kMode_M2:
@@ -151,12 +152,12 @@
       case kMode_M8I: {
         Register index = InputRegister(NextOffset(offset));
         ScaleFactor scale = ScaleFor(kMode_M1I, mode);
-        int32_t disp = InputInt32(NextOffset(offset));
-        return Operand(index, scale, disp);
+        Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+        return Operand(index, scale, ctant.ToInt32(), ctant.rmode());
       }
       case kMode_MI: {
-        int32_t disp = InputInt32(NextOffset(offset));
-        return Operand(Immediate(disp));
+        Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+        return Operand(ctant.ToInt32(), ctant.rmode());
       }
       case kMode_None:
         UNREACHABLE();
@@ -370,6 +371,50 @@
     }                                                                 \
   } while (0)
 
+#define ASSEMBLE_IEEE754_BINOP(name)                                          \
+  do {                                                                        \
+    /* Saves the esp into ebx */                                              \
+    __ push(ebx);                                                             \
+    __ mov(ebx, esp);                                                         \
+    /* Pass one double as argument on the stack. */                           \
+    __ PrepareCallCFunction(4, eax);                                          \
+    __ fstp(0);                                                               \
+    /* Load first operand from original stack */                              \
+    __ fld_d(MemOperand(ebx, 4 + kDoubleSize));                               \
+    /* Put first operand into stack for function call */                      \
+    __ fstp_d(Operand(esp, 0 * kDoubleSize));                                 \
+    /* Load second operand from original stack */                             \
+    __ fld_d(MemOperand(ebx, 4));                                             \
+    /* Put second operand into stack for function call */                     \
+    __ fstp_d(Operand(esp, 1 * kDoubleSize));                                 \
+    __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+                     4);                                                      \
+    /* Restore the ebx */                                                     \
+    __ pop(ebx);                                                              \
+    /* Return value is in st(0) on x87. */                                    \
+    __ lea(esp, Operand(esp, 2 * kDoubleSize));                               \
+  } while (false)
+
+#define ASSEMBLE_IEEE754_UNOP(name)                                           \
+  do {                                                                        \
+    /* Saves the esp into ebx */                                              \
+    __ push(ebx);                                                             \
+    __ mov(ebx, esp);                                                         \
+    /* Pass one double as argument on the stack. */                           \
+    __ PrepareCallCFunction(2, eax);                                          \
+    __ fstp(0);                                                               \
+    /* Load operand from original stack */                                    \
+    __ fld_d(MemOperand(ebx, 4));                                             \
+    /* Put operand into stack for function call */                            \
+    __ fstp_d(Operand(esp, 0));                                               \
+    __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+                     2);                                                      \
+    /* Restore the ebx */                                                     \
+    __ pop(ebx);                                                              \
+    /* Return value is in st(0) on x87. */                                    \
+    __ lea(esp, Operand(esp, kDoubleSize));                                   \
+  } while (false)
+
 void CodeGenerator::AssembleDeconstructFrame() {
   __ mov(esp, ebp);
   __ pop(ebp);
@@ -606,6 +651,14 @@
     case kArchTableSwitch:
       AssembleArchTableSwitch(instr);
       break;
+    case kArchComment: {
+      Address comment_string = i.InputExternalReference(0).address();
+      __ RecordComment(reinterpret_cast<const char*>(comment_string));
+      break;
+    }
+    case kArchDebugBreak:
+      __ int3();
+      break;
     case kArchNop:
     case kArchThrowTerminator:
       // don't emit code for nops.
@@ -695,6 +748,53 @@
       __ lea(i.OutputRegister(), Operand(base, offset.offset()));
       break;
     }
+    case kIeee754Float64Atan:
+      ASSEMBLE_IEEE754_UNOP(atan);
+      break;
+    case kIeee754Float64Atan2:
+      ASSEMBLE_IEEE754_BINOP(atan2);
+      break;
+    case kIeee754Float64Cbrt:
+      ASSEMBLE_IEEE754_UNOP(cbrt);
+      break;
+    case kIeee754Float64Cos:
+      __ X87SetFPUCW(0x027F);
+      ASSEMBLE_IEEE754_UNOP(cos);
+      __ X87SetFPUCW(0x037F);
+      break;
+    case kIeee754Float64Expm1:
+      __ X87SetFPUCW(0x027F);
+      ASSEMBLE_IEEE754_UNOP(expm1);
+      __ X87SetFPUCW(0x037F);
+      break;
+    case kIeee754Float64Exp:
+      ASSEMBLE_IEEE754_UNOP(exp);
+      break;
+    case kIeee754Float64Atanh:
+      ASSEMBLE_IEEE754_UNOP(atanh);
+      break;
+    case kIeee754Float64Log:
+      ASSEMBLE_IEEE754_UNOP(log);
+      break;
+    case kIeee754Float64Log1p:
+      ASSEMBLE_IEEE754_UNOP(log1p);
+      break;
+    case kIeee754Float64Log2:
+      ASSEMBLE_IEEE754_UNOP(log2);
+      break;
+    case kIeee754Float64Log10:
+      ASSEMBLE_IEEE754_UNOP(log10);
+      break;
+    case kIeee754Float64Sin:
+      __ X87SetFPUCW(0x027F);
+      ASSEMBLE_IEEE754_UNOP(sin);
+      __ X87SetFPUCW(0x037F);
+      break;
+    case kIeee754Float64Tan:
+      __ X87SetFPUCW(0x027F);
+      ASSEMBLE_IEEE754_UNOP(tan);
+      __ X87SetFPUCW(0x037F);
+      break;
     case kX87Add:
       if (HasImmediateInput(instr, 1)) {
         __ add(i.InputOperand(0), i.InputImmediate(1));
@@ -1523,6 +1623,30 @@
       __ lea(esp, Operand(esp, 2 * kDoubleSize));
       break;
     }
+    case kX87Float64SilenceNaN: {
+      Label end, return_qnan;
+      __ fstp(0);
+      __ push(ebx);
+      // Load Half word of HoleNan(SNaN) into ebx
+      __ mov(ebx, MemOperand(esp, 2 * kInt32Size));
+      __ cmp(ebx, Immediate(kHoleNanUpper32));
+      // Check input is HoleNaN(SNaN)?
+      __ j(equal, &return_qnan, Label::kNear);
+      // If input isn't HoleNaN(SNaN), just load it and return
+      __ fld_d(MemOperand(esp, 1 * kInt32Size));
+      __ jmp(&end);
+      __ bind(&return_qnan);
+      // If input is HoleNaN(SNaN), Return QNaN
+      __ push(Immediate(0xffffffff));
+      __ push(Immediate(0xfff7ffff));
+      __ fld_d(MemOperand(esp, 0));
+      __ lea(esp, Operand(esp, kDoubleSize));
+      __ bind(&end);
+      __ pop(ebx);
+      // Clear stack.
+      __ lea(esp, Operand(esp, 1 * kDoubleSize));
+      break;
+    }
     case kX87Movsxbl:
       __ movsx_b(i.OutputRegister(), i.MemoryOperand());
       break;
@@ -1661,27 +1785,29 @@
       if (instr->InputAt(0)->IsFPRegister()) {
         auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
         if (allocated.representation() == MachineRepresentation::kFloat32) {
-          __ sub(esp, Immediate(kDoubleSize));
+          __ sub(esp, Immediate(kFloatSize));
           __ fst_s(Operand(esp, 0));
+          frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
         } else {
           DCHECK(allocated.representation() == MachineRepresentation::kFloat64);
           __ sub(esp, Immediate(kDoubleSize));
           __ fst_d(Operand(esp, 0));
-        }
         frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+        }
       } else if (instr->InputAt(0)->IsFPStackSlot()) {
         auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
         if (allocated.representation() == MachineRepresentation::kFloat32) {
-          __ sub(esp, Immediate(kDoubleSize));
+          __ sub(esp, Immediate(kFloatSize));
           __ fld_s(i.InputOperand(0));
           __ fstp_s(MemOperand(esp, 0));
+          frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
         } else {
           DCHECK(allocated.representation() == MachineRepresentation::kFloat64);
           __ sub(esp, Immediate(kDoubleSize));
           __ fld_d(i.InputOperand(0));
           __ fstp_d(MemOperand(esp, 0));
-        }
         frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+        }
       } else if (HasImmediateInput(instr, 0)) {
         __ push(i.InputImmediate(0));
         frame_access_state()->IncreaseSPDelta(1);
diff --git a/src/compiler/x87/instruction-codes-x87.h b/src/compiler/x87/instruction-codes-x87.h
index 0cf9f35..2b4be3e 100644
--- a/src/compiler/x87/instruction-codes-x87.h
+++ b/src/compiler/x87/instruction-codes-x87.h
@@ -80,6 +80,7 @@
   V(X87Float64Sqrt)                \
   V(X87Float64Round)               \
   V(X87Float64Cmp)                 \
+  V(X87Float64SilenceNaN)          \
   V(X87Movsxbl)                    \
   V(X87Movzxbl)                    \
   V(X87Movb)                       \
diff --git a/src/compiler/x87/instruction-selector-x87.cc b/src/compiler/x87/instruction-selector-x87.cc
index a99e7a6..45779c7 100644
--- a/src/compiler/x87/instruction-selector-x87.cc
+++ b/src/compiler/x87/instruction-selector-x87.cc
@@ -1009,7 +1009,6 @@
   Emit(kX87Float64Abs, g.DefineAsFixed(node, stX_0), 0, nullptr);
 }
 
-
 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
   X87OperandGenerator g(this);
   Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
@@ -1084,6 +1083,24 @@
        g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
 }
 
+void InstructionSelector::VisitFloat32Neg(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Neg(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+                                                   InstructionCode opcode) {
+  X87OperandGenerator g(this);
+  Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+  Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
+  Emit(opcode, g.DefineAsFixed(node, stX_0), 0, nullptr)->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+                                                  InstructionCode opcode) {
+  X87OperandGenerator g(this);
+  Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+  Emit(opcode, g.DefineAsFixed(node, stX_0), 0, nullptr)->MarkAsCall();
+}
 
 void InstructionSelector::EmitPrepareArguments(
     ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
@@ -1118,7 +1135,7 @@
           g.CanBeImmediate(input.node())
               ? g.UseImmediate(input.node())
               : IsSupported(ATOM) ||
-                        sequence()->IsFloat(GetVirtualRegister(input.node()))
+                        sequence()->IsFP(GetVirtualRegister(input.node()))
                     ? g.UseRegister(input.node())
                     : g.Use(input.node());
       Emit(kX87Push, g.NoOutput(), value);
@@ -1612,6 +1629,12 @@
        g.UseRegister(left), g.UseRegister(right));
 }
 
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+  X87OperandGenerator g(this);
+  Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+  Emit(kX87Float64SilenceNaN, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
+
 void InstructionSelector::VisitAtomicLoad(Node* node) {
   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
   DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
@@ -1683,6 +1706,13 @@
   return flags;
 }
 
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+  return MachineOperatorBuilder::AlignmentRequirements::
+      FullUnalignedAccessSupport();
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/contexts-inl.h b/src/contexts-inl.h
index 5d62a04..990b50e 100644
--- a/src/contexts-inl.h
+++ b/src/contexts-inl.h
@@ -58,7 +58,7 @@
 
 Object* Context::next_context_link() { return get(Context::NEXT_CONTEXT_LINK); }
 
-bool Context::has_extension() { return !extension()->IsTheHole(); }
+bool Context::has_extension() { return !extension()->IsTheHole(GetIsolate()); }
 HeapObject* Context::extension() {
   return HeapObject::cast(get(EXTENSION_INDEX));
 }
diff --git a/src/contexts.cc b/src/contexts.cc
index 392a3cc..aa46b47 100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -92,7 +92,7 @@
 JSObject* Context::extension_object() {
   DCHECK(IsNativeContext() || IsFunctionContext() || IsBlockContext());
   HeapObject* object = extension();
-  if (object->IsTheHole()) return nullptr;
+  if (object->IsTheHole(GetIsolate())) return nullptr;
   if (IsBlockContext()) {
     if (!object->IsSloppyBlockWithEvalContextExtension()) return nullptr;
     object = SloppyBlockWithEvalContextExtension::cast(object)->extension();
@@ -443,10 +443,11 @@
 
 void Context::AddOptimizedFunction(JSFunction* function) {
   DCHECK(IsNativeContext());
+  Isolate* isolate = GetIsolate();
 #ifdef ENABLE_SLOW_DCHECKS
   if (FLAG_enable_slow_asserts) {
     Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
-    while (!element->IsUndefined()) {
+    while (!element->IsUndefined(isolate)) {
       CHECK(element != function);
       element = JSFunction::cast(element)->next_function_link();
     }
@@ -454,8 +455,8 @@
 
   // Check that the context belongs to the weak native contexts list.
   bool found = false;
-  Object* context = GetHeap()->native_contexts_list();
-  while (!context->IsUndefined()) {
+  Object* context = isolate->heap()->native_contexts_list();
+  while (!context->IsUndefined(isolate)) {
     if (context == this) {
       found = true;
       break;
@@ -467,12 +468,12 @@
 
   // If the function link field is already used then the function was
   // enqueued as a code flushing candidate and we remove it now.
-  if (!function->next_function_link()->IsUndefined()) {
+  if (!function->next_function_link()->IsUndefined(isolate)) {
     CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher();
     flusher->EvictCandidate(function);
   }
 
-  DCHECK(function->next_function_link()->IsUndefined());
+  DCHECK(function->next_function_link()->IsUndefined(isolate));
 
   function->set_next_function_link(get(OPTIMIZED_FUNCTIONS_LIST),
                                    UPDATE_WEAK_WRITE_BARRIER);
@@ -484,9 +485,10 @@
   DCHECK(IsNativeContext());
   Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
   JSFunction* prev = NULL;
-  while (!element->IsUndefined()) {
+  Isolate* isolate = function->GetIsolate();
+  while (!element->IsUndefined(isolate)) {
     JSFunction* element_function = JSFunction::cast(element);
-    DCHECK(element_function->next_function_link()->IsUndefined() ||
+    DCHECK(element_function->next_function_link()->IsUndefined(isolate) ||
            element_function->next_function_link()->IsJSFunction());
     if (element_function == function) {
       if (prev == NULL) {
@@ -522,7 +524,7 @@
 void Context::AddOptimizedCode(Code* code) {
   DCHECK(IsNativeContext());
   DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
-  DCHECK(code->next_code_link()->IsUndefined());
+  DCHECK(code->next_code_link()->IsUndefined(GetIsolate()));
   code->set_next_code_link(get(OPTIMIZED_CODE_LIST));
   set(OPTIMIZED_CODE_LIST, code, UPDATE_WEAK_WRITE_BARRIER);
 }
@@ -555,7 +557,7 @@
 Handle<Object> Context::ErrorMessageForCodeGenerationFromStrings() {
   Isolate* isolate = GetIsolate();
   Handle<Object> result(error_message_for_code_gen_from_strings(), isolate);
-  if (!result->IsUndefined()) return result;
+  if (!result->IsUndefined(isolate)) return result;
   return isolate->factory()->NewStringFromStaticChars(
       "Code generation from strings disallowed for this context");
 }
diff --git a/src/contexts.h b/src/contexts.h
index 1161885..78ec431 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -86,8 +86,10 @@
   V(REFLECT_DELETE_PROPERTY_INDEX, JSFunction, reflect_delete_property) \
   V(SPREAD_ARGUMENTS_INDEX, JSFunction, spread_arguments)               \
   V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable)                 \
-  V(MATH_FLOOR, JSFunction, math_floor)                                 \
-  V(MATH_SQRT, JSFunction, math_sqrt)
+  V(MATH_EXP_INDEX, JSFunction, math_exp)                               \
+  V(MATH_FLOOR_INDEX, JSFunction, math_floor)                           \
+  V(MATH_LOG_INDEX, JSFunction, math_log)                               \
+  V(MATH_SQRT_INDEX, JSFunction, math_sqrt)
 
 #define NATIVE_CONTEXT_IMPORTED_FIELDS(V)                                   \
   V(ARRAY_CONCAT_INDEX, JSFunction, array_concat)                           \
@@ -104,7 +106,6 @@
   V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function)             \
   V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun)       \
   V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun)                     \
-  V(JSON_SERIALIZE_ADAPTER_INDEX, JSFunction, json_serialize_adapter)       \
   V(MAKE_ERROR_FUNCTION_INDEX, JSFunction, make_error_function)             \
   V(MAP_DELETE_METHOD_INDEX, JSFunction, map_delete)                        \
   V(MAP_GET_METHOD_INDEX, JSFunction, map_get)                              \
@@ -216,6 +217,7 @@
   V(NORMALIZED_MAP_CACHE_INDEX, Object, normalized_map_cache)                  \
   V(NUMBER_FUNCTION_INDEX, JSFunction, number_function)                        \
   V(OBJECT_FUNCTION_INDEX, JSFunction, object_function)                        \
+  V(OBJECT_WITH_NULL_PROTOTYPE_MAP, Map, object_with_null_prototype_map)       \
   V(OBJECT_FUNCTION_PROTOTYPE_MAP_INDEX, Map, object_function_prototype_map)   \
   V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function)    \
   V(PROXY_CALLABLE_MAP_INDEX, Map, proxy_callable_map)                         \
@@ -238,6 +240,10 @@
   V(SLOPPY_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX, Map,                    \
     sloppy_function_with_readonly_prototype_map)                               \
   V(WASM_FUNCTION_MAP_INDEX, Map, wasm_function_map)                           \
+  V(WASM_MODULE_CONSTRUCTOR_INDEX, JSFunction, wasm_module_constructor)        \
+  V(WASM_INSTANCE_CONSTRUCTOR_INDEX, JSFunction, wasm_instance_constructor)    \
+  V(WASM_MODULE_SYM_INDEX, Symbol, wasm_module_sym)                            \
+  V(WASM_INSTANCE_SYM_INDEX, Symbol, wasm_instance_sym)                        \
   V(SLOPPY_ASYNC_FUNCTION_MAP_INDEX, Map, sloppy_async_function_map)           \
   V(SLOPPY_GENERATOR_FUNCTION_MAP_INDEX, Map, sloppy_generator_function_map)   \
   V(SLOW_ALIASED_ARGUMENTS_MAP_INDEX, Map, slow_aliased_arguments_map)         \
@@ -250,6 +256,8 @@
   V(STRING_FUNCTION_INDEX, JSFunction, string_function)                        \
   V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map)   \
   V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function)                        \
+  V(TYPED_ARRAY_FUN_INDEX, JSFunction, typed_array_function)                   \
+  V(TYPED_ARRAY_PROTOTYPE_INDEX, JSObject, typed_array_prototype)              \
   V(UINT16_ARRAY_FUN_INDEX, JSFunction, uint16_array_fun)                      \
   V(UINT16X8_FUNCTION_INDEX, JSFunction, uint16x8_function)                    \
   V(UINT32_ARRAY_FUN_INDEX, JSFunction, uint32_array_fun)                      \
diff --git a/src/counters.cc b/src/counters.cc
index 0dd62a0..57dad3d 100644
--- a/src/counters.cc
+++ b/src/counters.cc
@@ -9,6 +9,7 @@
 #include "src/base/platform/platform.h"
 #include "src/isolate.h"
 #include "src/log-inl.h"
+#include "src/log.h"
 
 namespace v8 {
 namespace internal {
@@ -200,14 +201,14 @@
   void Print(std::ostream& os) {
     if (total_call_count == 0) return;
     std::sort(entries.rbegin(), entries.rend());
-    os << std::setw(50) << "Runtime Function/C++ Builtin" << std::setw(10)
+    os << std::setw(50) << "Runtime Function/C++ Builtin" << std::setw(12)
        << "Time" << std::setw(18) << "Count" << std::endl
-       << std::string(86, '=') << std::endl;
+       << std::string(88, '=') << std::endl;
     for (Entry& entry : entries) {
       entry.SetTotal(total_time, total_call_count);
       entry.Print(os);
     }
-    os << std::string(86, '-') << std::endl;
+    os << std::string(88, '-') << std::endl;
     Entry("Total", total_time, total_call_count).Print(os);
   }
 
@@ -223,7 +224,7 @@
    public:
     Entry(const char* name, base::TimeDelta time, uint64_t count)
         : name_(name),
-          time_(time.InMilliseconds()),
+          time_(time.InMicroseconds()),
           count_(count),
           time_percent_(100),
           count_percent_(100) {}
@@ -236,9 +237,9 @@
 
     void Print(std::ostream& os) {
       os.precision(2);
-      os << std::fixed;
+      os << std::fixed << std::setprecision(2);
       os << std::setw(50) << name_;
-      os << std::setw(8) << time_ << "ms ";
+      os << std::setw(10) << static_cast<double>(time_) / 1000 << "ms ";
       os << std::setw(6) << time_percent_ << "%";
       os << std::setw(10) << count_ << " ";
       os << std::setw(6) << count_percent_ << "%";
@@ -246,10 +247,10 @@
     }
 
     void SetTotal(base::TimeDelta total_time, uint64_t total_count) {
-      if (total_time.InMilliseconds() == 0) {
+      if (total_time.InMicroseconds() == 0) {
         time_percent_ = 0;
       } else {
-        time_percent_ = 100.0 * time_ / total_time.InMilliseconds();
+        time_percent_ = 100.0 * time_ / total_time.InMicroseconds();
       }
       count_percent_ = 100.0 * count_ / total_count;
     }
@@ -284,8 +285,17 @@
 // static
 void RuntimeCallStats::Leave(Isolate* isolate, RuntimeCallTimer* timer) {
   RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();
-  DCHECK_EQ(stats->current_timer_, timer);
-  stats->current_timer_ = timer->Stop();
+
+  if (stats->current_timer_ == timer) {
+    stats->current_timer_ = timer->Stop();
+  } else {
+    // Must be a Threading cctest. Walk the chain of Timers to find the
+    // buried one that's leaving. We don't care about keeping nested timings
+    // accurate, just avoid crashing by keeping the chain intact.
+    RuntimeCallTimer* next = stats->current_timer_;
+    while (next->parent_ != timer) next = next->parent_;
+    next->parent_ = timer->Stop();
+  }
 }
 
 // static
@@ -308,7 +318,7 @@
   FOR_EACH_INTRINSIC(PRINT_COUNTER)
 #undef PRINT_COUNTER
 
-#define PRINT_COUNTER(name, type) entries.Add(&this->Builtin_##name);
+#define PRINT_COUNTER(name) entries.Add(&this->Builtin_##name);
   BUILTIN_LIST_C(PRINT_COUNTER)
 #undef PRINT_COUNTER
 
@@ -333,7 +343,7 @@
   FOR_EACH_INTRINSIC(RESET_COUNTER)
 #undef RESET_COUNTER
 
-#define RESET_COUNTER(name, type) this->Builtin_##name.Reset();
+#define RESET_COUNTER(name) this->Builtin_##name.Reset();
   BUILTIN_LIST_C(RESET_COUNTER)
 #undef RESET_COUNTER
 
diff --git a/src/counters.h b/src/counters.h
index a61cacf..3c82a18 100644
--- a/src/counters.h
+++ b/src/counters.h
@@ -493,6 +493,8 @@
 class RuntimeCallTimer {
  public:
   RuntimeCallTimer() {}
+  RuntimeCallCounter* counter() { return counter_; }
+  base::ElapsedTimer timer() { return timer_; }
 
  private:
   friend class RuntimeCallStats;
@@ -660,6 +662,7 @@
   V(AccessorNameSetterCallback)                     \
   V(Compile)                                        \
   V(CompileCode)                                    \
+  V(CompileCodeLazy)                                \
   V(CompileDeserialize)                             \
   V(CompileEval)                                    \
   V(CompileFullCode)                                \
@@ -706,7 +709,6 @@
   V(KeyedStoreIC_StoreElementStub)              \
   V(KeyedStoreIC_Polymorphic)                   \
   V(LoadIC_FunctionPrototypeStub)               \
-  V(LoadIC_ArrayBufferViewLoadFieldStub)        \
   V(LoadIC_LoadApiGetterStub)                   \
   V(LoadIC_LoadCallback)                        \
   V(LoadIC_LoadConstant)                        \
@@ -745,7 +747,7 @@
   RuntimeCallCounter Runtime_##name = RuntimeCallCounter(#name);
   FOR_EACH_INTRINSIC(CALL_RUNTIME_COUNTER)
 #undef CALL_RUNTIME_COUNTER
-#define CALL_BUILTIN_COUNTER(name, type) \
+#define CALL_BUILTIN_COUNTER(name) \
   RuntimeCallCounter Builtin_##name = RuntimeCallCounter(#name);
   BUILTIN_LIST_C(CALL_BUILTIN_COUNTER)
 #undef CALL_BUILTIN_COUNTER
@@ -776,6 +778,7 @@
   void Print(std::ostream& os);
 
   RuntimeCallStats() { Reset(); }
+  RuntimeCallTimer* current_timer() { return current_timer_; }
 
  private:
   // Counter to track recursive time events.
@@ -1015,7 +1018,6 @@
   SC(regexp_entry_native, V8.RegExpEntryNative)                                \
   SC(number_to_string_native, V8.NumberToStringNative)                         \
   SC(number_to_string_runtime, V8.NumberToStringRuntime)                       \
-  SC(math_atan2_runtime, V8.MathAtan2Runtime)                                  \
   SC(math_exp_runtime, V8.MathExpRuntime)                                      \
   SC(math_log_runtime, V8.MathLogRuntime)                                      \
   SC(math_pow_runtime, V8.MathPowRuntime)                                      \
diff --git a/src/crankshaft/arm/lithium-arm.cc b/src/crankshaft/arm/lithium-arm.cc
index d8ee9cd..538b309 100644
--- a/src/crankshaft/arm/lithium-arm.cc
+++ b/src/crankshaft/arm/lithium-arm.cc
@@ -1069,6 +1069,10 @@
       return DoMathAbs(instr);
     case kMathLog:
       return DoMathLog(instr);
+    case kMathCos:
+      return DoMathCos(instr);
+    case kMathSin:
+      return DoMathSin(instr);
     case kMathExp:
       return DoMathExp(instr);
     case kMathSqrt:
@@ -1134,16 +1138,25 @@
   return DefineAsRegister(result);
 }
 
+LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseFixedDouble(instr->value(), d0);
+  return MarkAsCall(DefineFixedDouble(new (zone()) LMathCos(input), d0), instr);
+}
+
+LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseFixedDouble(instr->value(), d0);
+  return MarkAsCall(DefineFixedDouble(new (zone()) LMathSin(input), d0), instr);
+}
 
 LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
   DCHECK(instr->representation().IsDouble());
   DCHECK(instr->value()->representation().IsDouble());
-  LOperand* input = UseRegister(instr->value());
-  LOperand* temp1 = TempRegister();
-  LOperand* temp2 = TempRegister();
-  LOperand* double_temp = TempDoubleRegister();
-  LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
-  return DefineAsRegister(result);
+  LOperand* input = UseFixedDouble(instr->value(), d0);
+  return MarkAsCall(DefineFixedDouble(new (zone()) LMathExp(input), d0), instr);
 }
 
 
@@ -1974,13 +1987,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
-  LOperand* lo = UseRegister(instr->lo());
-  LOperand* hi = UseRegister(instr->hi());
-  return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
-}
-
-
 LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
   LOperand* context = info()->IsStub()
       ? UseFixed(instr->context(), cp)
@@ -2012,14 +2018,9 @@
 
 LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* global_object =
-      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
-  LLoadGlobalGeneric* result =
-      new(zone()) LLoadGlobalGeneric(context, global_object, vector);
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+
+  LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
 
@@ -2063,10 +2064,7 @@
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* object =
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
 
   LInstruction* result =
       DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), r0);
@@ -2138,10 +2136,7 @@
   LOperand* object =
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
   LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
 
   LInstruction* result =
       DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
@@ -2203,12 +2198,8 @@
   DCHECK(instr->key()->representation().IsTagged());
   DCHECK(instr->value()->representation().IsTagged());
 
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
-    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
-  }
+  LOperand* slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+  LOperand* vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
 
   LStoreKeyedGeneric* result =
       new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
@@ -2297,12 +2288,8 @@
   LOperand* obj =
       UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
   LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
-    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
-  }
+  LOperand* slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+  LOperand* vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
 
   LStoreNamedGeneric* result =
       new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
diff --git a/src/crankshaft/arm/lithium-arm.h b/src/crankshaft/arm/lithium-arm.h
index 2ec992f..4aa4a71 100644
--- a/src/crankshaft/arm/lithium-arm.h
+++ b/src/crankshaft/arm/lithium-arm.h
@@ -53,7 +53,6 @@
   V(ConstantI)                               \
   V(ConstantS)                               \
   V(ConstantT)                               \
-  V(ConstructDouble)                         \
   V(Context)                                 \
   V(DebugBreak)                              \
   V(DeclareGlobals)                          \
@@ -98,6 +97,8 @@
   V(LoadNamedGeneric)                        \
   V(MathAbs)                                 \
   V(MathClz32)                               \
+  V(MathCos)                                 \
+  V(MathSin)                                 \
   V(MathExp)                                 \
   V(MathFloor)                               \
   V(MathFround)                              \
@@ -905,24 +906,29 @@
   DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
 };
 
-
-class LMathExp final : public LTemplateInstruction<1, 1, 3> {
+class LMathCos final : public LTemplateInstruction<1, 1, 0> {
  public:
-  LMathExp(LOperand* value,
-           LOperand* double_temp,
-           LOperand* temp1,
-           LOperand* temp2) {
-    inputs_[0] = value;
-    temps_[0] = temp1;
-    temps_[1] = temp2;
-    temps_[2] = double_temp;
-    ExternalReference::InitializeMathExpData();
-  }
+  explicit LMathCos(LOperand* value) { inputs_[0] = value; }
 
   LOperand* value() { return inputs_[0]; }
-  LOperand* temp1() { return temps_[0]; }
-  LOperand* temp2() { return temps_[1]; }
-  LOperand* double_temp() { return temps_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
+
+class LMathSin final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathSin(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
+};
+
+class LMathExp final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathExp(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
 };
@@ -1567,18 +1573,14 @@
   DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
 };
 
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
  public:
-  LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
-                     LOperand* vector) {
+  LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
     inputs_[0] = context;
-    inputs_[1] = global_object;
     temps_[0] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
-  LOperand* global_object() { return inputs_[1]; }
   LOperand* temp_vector() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
@@ -2339,20 +2341,6 @@
 };
 
 
-class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
- public:
-  LConstructDouble(LOperand* hi, LOperand* lo) {
-    inputs_[0] = hi;
-    inputs_[1] = lo;
-  }
-
-  LOperand* hi() { return inputs_[0]; }
-  LOperand* lo() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
-};
-
-
 class LAllocate final : public LTemplateInstruction<1, 2, 2> {
  public:
   LAllocate(LOperand* context,
@@ -2545,6 +2533,8 @@
   LInstruction* DoMathFround(HUnaryMathOperation* instr);
   LInstruction* DoMathAbs(HUnaryMathOperation* instr);
   LInstruction* DoMathLog(HUnaryMathOperation* instr);
+  LInstruction* DoMathCos(HUnaryMathOperation* instr);
+  LInstruction* DoMathSin(HUnaryMathOperation* instr);
   LInstruction* DoMathExp(HUnaryMathOperation* instr);
   LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
   LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
diff --git a/src/crankshaft/arm/lithium-codegen-arm.cc b/src/crankshaft/arm/lithium-codegen-arm.cc
index 340642a..e9d3f46 100644
--- a/src/crankshaft/arm/lithium-codegen-arm.cc
+++ b/src/crankshaft/arm/lithium-codegen-arm.cc
@@ -11,7 +11,6 @@
 #include "src/crankshaft/hydrogen-osr.h"
 #include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
-#include "src/profiler/cpu-profiler.h"
 
 namespace v8 {
 namespace internal {
@@ -835,7 +834,7 @@
                                             !frame_is_built_);
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
-    if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+    if (FLAG_trace_deopt || isolate()->is_profiling() ||
         jump_table_.is_empty() ||
         !table_entry.IsEquivalentTo(jump_table_.last())) {
       jump_table_.Add(table_entry, zone());
@@ -910,7 +909,6 @@
 void LCodeGen::RecordAndWritePosition(int position) {
   if (position == RelocInfo::kNoPosition) return;
   masm()->positions_recorder()->RecordPosition(position);
-  masm()->positions_recorder()->WriteRecordedPositions();
 }
 
 
@@ -2526,10 +2524,10 @@
   DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
 
   __ ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
-  __ cmp(object_prototype, prototype);
-  EmitTrueBranch(instr, eq);
   __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
   EmitFalseBranch(instr, eq);
+  __ cmp(object_prototype, prototype);
+  EmitTrueBranch(instr, eq);
   __ ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
   __ b(&loop);
 }
@@ -2623,15 +2621,12 @@
 
 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->global_object())
-             .is(LoadDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->result()).is(r0));
 
-  __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
   EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
-                        isolate(), instr->typeof_mode(), PREMONOMORPHIC)
-                        .code();
+  Handle<Code> ic =
+      CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
+          .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2726,10 +2721,7 @@
   // Name is always in r2.
   __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
   EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
-                        isolate(), NOT_INSIDE_TYPEOF,
-                        instr->hydrogen()->initialization_state())
-                        .code();
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
 }
 
@@ -3023,13 +3015,9 @@
   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-  }
+  EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
 
-  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
-                        isolate(), instr->hydrogen()->initialization_state())
-                        .code();
+  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
 }
 
@@ -3546,26 +3534,32 @@
   }
 }
 
+void LCodeGen::DoMathCos(LMathCos* instr) {
+  __ PrepareCallCFunction(0, 1, scratch0());
+  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+  __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
+  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
+
+void LCodeGen::DoMathSin(LMathSin* instr) {
+  __ PrepareCallCFunction(0, 1, scratch0());
+  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+  __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
+  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
 
 void LCodeGen::DoMathExp(LMathExp* instr) {
-  DwVfpRegister input = ToDoubleRegister(instr->value());
-  DwVfpRegister result = ToDoubleRegister(instr->result());
-  DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
-  DwVfpRegister double_scratch2 = double_scratch0();
-  Register temp1 = ToRegister(instr->temp1());
-  Register temp2 = ToRegister(instr->temp2());
-
-  MathExpGenerator::EmitMathExp(
-      masm(), input, result, double_scratch1, double_scratch2,
-      temp1, temp2, scratch0());
+  __ PrepareCallCFunction(0, 1, scratch0());
+  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+  __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
+  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
 }
 
 
 void LCodeGen::DoMathLog(LMathLog* instr) {
   __ PrepareCallCFunction(0, 1, scratch0());
   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
-  __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
-                   0, 1);
+  __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
 }
 
@@ -3588,7 +3582,9 @@
 #endif
   if (FLAG_code_comments) {
     if (actual.is_reg()) {
-      Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+      Comment(";;; PrepareForTailCall, actual: %s {",
+              RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
+                  actual.reg().code()));
     } else {
       Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
     }
@@ -3715,14 +3711,8 @@
   DCHECK(ToRegister(instr->result()).is(r0));
 
   __ mov(r0, Operand(instr->arity()));
-  if (instr->arity() == 1) {
-    // We only need the allocation site for the case we have a length argument.
-    // The case may bail out to the runtime, which will determine the correct
-    // elements kind with the site.
-    __ Move(r2, instr->hydrogen()->site());
-  } else {
-    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
-  }
+  __ Move(r2, instr->hydrogen()->site());
+
   ElementsKind kind = instr->hydrogen()->elements_kind();
   AllocationSiteOverrideMode override_mode =
       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
@@ -3755,7 +3745,7 @@
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
     __ bind(&done);
   } else {
-    ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+    ArrayNArgumentsConstructorStub stub(isolate());
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
@@ -3876,14 +3866,12 @@
   DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-  }
+  EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
 
   __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
-  Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
-                        isolate(), instr->language_mode(),
-                        instr->hydrogen()->initialization_state()).code();
+  Handle<Code> ic =
+      CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
+          .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
 }
 
@@ -4090,13 +4078,11 @@
   DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-  }
+  EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
 
   Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
-                        isolate(), instr->language_mode(),
-                        instr->hydrogen()->initialization_state()).code();
+                        isolate(), instr->language_mode())
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
 }
 
@@ -4232,8 +4218,7 @@
     DCHECK(object_reg.is(r0));
     PushSafepointRegistersScope scope(this);
     __ Move(r1, to_map);
-    bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
-    TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+    TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
     __ CallStub(&stub);
     RecordSafepointWithRegisters(
         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
@@ -5059,15 +5044,6 @@
 }
 
 
-void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
-  Register hi_reg = ToRegister(instr->hi());
-  Register lo_reg = ToRegister(instr->lo());
-  DwVfpRegister result_reg = ToDoubleRegister(instr->result());
-  __ VmovHigh(result_reg, hi_reg);
-  __ VmovLow(result_reg, lo_reg);
-}
-
-
 void LCodeGen::DoAllocate(LAllocate* instr) {
   class DeferredAllocate final : public LDeferredCode {
    public:
diff --git a/src/crankshaft/arm64/lithium-arm64.cc b/src/crankshaft/arm64/lithium-arm64.cc
index 2154398..9a0a7c4 100644
--- a/src/crankshaft/arm64/lithium-arm64.cc
+++ b/src/crankshaft/arm64/lithium-arm64.cc
@@ -1553,15 +1553,9 @@
 
 LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* global_object =
-      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
 
-  LLoadGlobalGeneric* result =
-      new(zone()) LLoadGlobalGeneric(context, global_object, vector);
+  LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
   return MarkAsCall(DefineFixed(result, x0), instr);
 }
 
@@ -1621,10 +1615,7 @@
   LOperand* object =
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
   LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
 
   LInstruction* result =
       DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
@@ -1643,10 +1634,7 @@
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* object =
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
 
   LInstruction* result =
       DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), x0);
@@ -1931,13 +1919,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
-  LOperand* lo = UseRegisterAndClobber(instr->lo());
-  LOperand* hi = UseRegister(instr->hi());
-  return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
-}
-
-
 LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
   LOperand* context = info()->IsStub()
       ? UseFixed(instr->context(), cp)
@@ -2246,12 +2227,8 @@
   DCHECK(instr->key()->representation().IsTagged());
   DCHECK(instr->value()->representation().IsTagged());
 
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
-    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
-  }
+  LOperand* slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+  LOperand* vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
 
   LStoreKeyedGeneric* result = new (zone())
       LStoreKeyedGeneric(context, object, key, value, slot, vector);
@@ -2294,12 +2271,8 @@
       UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
   LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
 
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
-    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
-  }
+  LOperand* slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+  LOperand* vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
 
   LStoreNamedGeneric* result =
       new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
@@ -2476,17 +2449,26 @@
         return result;
       }
     }
+    case kMathCos: {
+      DCHECK(instr->representation().IsDouble());
+      DCHECK(instr->value()->representation().IsDouble());
+      LOperand* input = UseFixedDouble(instr->value(), d0);
+      LMathCos* result = new (zone()) LMathCos(input);
+      return MarkAsCall(DefineFixedDouble(result, d0), instr);
+    }
+    case kMathSin: {
+      DCHECK(instr->representation().IsDouble());
+      DCHECK(instr->value()->representation().IsDouble());
+      LOperand* input = UseFixedDouble(instr->value(), d0);
+      LMathSin* result = new (zone()) LMathSin(input);
+      return MarkAsCall(DefineFixedDouble(result, d0), instr);
+    }
     case kMathExp: {
       DCHECK(instr->representation().IsDouble());
       DCHECK(instr->value()->representation().IsDouble());
-      LOperand* input = UseRegister(instr->value());
-      LOperand* double_temp1 = TempDoubleRegister();
-      LOperand* temp1 = TempRegister();
-      LOperand* temp2 = TempRegister();
-      LOperand* temp3 = TempRegister();
-      LMathExp* result = new(zone()) LMathExp(input, double_temp1,
-                                              temp1, temp2, temp3);
-      return DefineAsRegister(result);
+      LOperand* input = UseFixedDouble(instr->value(), d0);
+      LMathExp* result = new (zone()) LMathExp(input);
+      return MarkAsCall(DefineFixedDouble(result, d0), instr);
     }
     case kMathFloor: {
       DCHECK(instr->value()->representation().IsDouble());
diff --git a/src/crankshaft/arm64/lithium-arm64.h b/src/crankshaft/arm64/lithium-arm64.h
index 383e5c3..231008d 100644
--- a/src/crankshaft/arm64/lithium-arm64.h
+++ b/src/crankshaft/arm64/lithium-arm64.h
@@ -57,7 +57,6 @@
   V(ConstantI)                               \
   V(ConstantS)                               \
   V(ConstantT)                               \
-  V(ConstructDouble)                         \
   V(Context)                                 \
   V(DebugBreak)                              \
   V(DeclareGlobals)                          \
@@ -104,6 +103,8 @@
   V(MathAbs)                                 \
   V(MathAbsTagged)                           \
   V(MathClz32)                               \
+  V(MathCos)                                 \
+  V(MathSin)                                 \
   V(MathExp)                                 \
   V(MathFloorD)                              \
   V(MathFloorI)                              \
@@ -993,20 +994,6 @@
 };
 
 
-class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
- public:
-  LConstructDouble(LOperand* hi, LOperand* lo) {
-    inputs_[0] = hi;
-    inputs_[1] = lo;
-  }
-
-  LOperand* hi() { return inputs_[0]; }
-  LOperand* lo() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
-};
-
-
 class LClassOfTestAndBranch final : public LControlInstruction<1, 2> {
  public:
   LClassOfTestAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
@@ -1566,18 +1553,14 @@
   DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
 };
 
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
  public:
-  LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
-                     LOperand* vector) {
+  LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
     inputs_[0] = context;
-    inputs_[1] = global_object;
     temps_[0] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
-  LOperand* global_object() { return inputs_[1]; }
   LOperand* temp_vector() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
@@ -1763,26 +1746,23 @@
   DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
 };
 
-
-class LMathExp final : public LUnaryMathOperation<4> {
+class LMathCos final : public LUnaryMathOperation<0> {
  public:
-  LMathExp(LOperand* value,
-                LOperand* double_temp1,
-                LOperand* temp1,
-                LOperand* temp2,
-                LOperand* temp3)
-      : LUnaryMathOperation<4>(value) {
-    temps_[0] = double_temp1;
-    temps_[1] = temp1;
-    temps_[2] = temp2;
-    temps_[3] = temp3;
-    ExternalReference::InitializeMathExpData();
-  }
+  explicit LMathCos(LOperand* value) : LUnaryMathOperation<0>(value) {}
 
-  LOperand* double_temp1() { return temps_[0]; }
-  LOperand* temp1() { return temps_[1]; }
-  LOperand* temp2() { return temps_[2]; }
-  LOperand* temp3() { return temps_[3]; }
+  DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
+
+class LMathSin final : public LUnaryMathOperation<0> {
+ public:
+  explicit LMathSin(LOperand* value) : LUnaryMathOperation<0>(value) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
+};
+
+class LMathExp final : public LUnaryMathOperation<0> {
+ public:
+  explicit LMathExp(LOperand* value) : LUnaryMathOperation<0>(value) {}
 
   DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
 };
diff --git a/src/crankshaft/arm64/lithium-codegen-arm64.cc b/src/crankshaft/arm64/lithium-codegen-arm64.cc
index ebc5277..8df6580 100644
--- a/src/crankshaft/arm64/lithium-codegen-arm64.cc
+++ b/src/crankshaft/arm64/lithium-codegen-arm64.cc
@@ -12,7 +12,6 @@
 #include "src/crankshaft/hydrogen-osr.h"
 #include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
-#include "src/profiler/cpu-profiler.h"
 
 namespace v8 {
 namespace internal {
@@ -365,15 +364,7 @@
   DCHECK(ToRegister(instr->constructor()).is(x1));
 
   __ Mov(x0, Operand(instr->arity()));
-  if (instr->arity() == 1) {
-    // We only need the allocation site for the case we have a length argument.
-    // The case may bail out to the runtime, which will determine the correct
-    // elements kind with the site.
-    __ Mov(x2, instr->hydrogen()->site());
-  } else {
-    __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
-  }
-
+  __ Mov(x2, instr->hydrogen()->site());
 
   ElementsKind kind = instr->hydrogen()->elements_kind();
   AllocationSiteOverrideMode override_mode =
@@ -406,7 +397,7 @@
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
     __ Bind(&done);
   } else {
-    ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+    ArrayNArgumentsConstructorStub stub(isolate());
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
   RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
@@ -457,7 +448,6 @@
 void LCodeGen::RecordAndWritePosition(int position) {
   if (position == RelocInfo::kNoPosition) return;
   masm()->positions_recorder()->RecordPosition(position);
-  masm()->positions_recorder()->WriteRecordedPositions();
 }
 
 
@@ -904,7 +894,7 @@
             entry, deopt_info, bailout_type, !frame_is_built_);
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
-    if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+    if (FLAG_trace_deopt || isolate()->is_profiling() ||
         jump_table_.is_empty() ||
         !table_entry->IsEquivalentTo(*jump_table_.last())) {
       jump_table_.Add(table_entry, zone());
@@ -2227,18 +2217,6 @@
 }
 
 
-void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
-  Register hi_reg = ToRegister(instr->hi());
-  Register lo_reg = ToRegister(instr->lo());
-  DoubleRegister result_reg = ToDoubleRegister(instr->result());
-
-  // Insert the least significant 32 bits of hi_reg into the most significant
-  // 32 bits of lo_reg, and move to a floating point register.
-  __ Bfi(lo_reg, hi_reg, 32, 32);
-  __ Fmov(result_reg, lo_reg);
-}
-
-
 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
   Handle<String> class_name = instr->hydrogen()->class_name();
   Label* true_label = instr->TrueLabel(chunk_);
@@ -2833,10 +2811,10 @@
   DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
 
   __ Ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
-  __ Cmp(object_prototype, prototype);
-  __ B(eq, instr->TrueLabel(chunk_));
   __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
   __ B(eq, instr->FalseLabel(chunk_));
+  __ Cmp(object_prototype, prototype);
+  __ B(eq, instr->TrueLabel(chunk_));
   __ Ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
   __ B(&loop);
 }
@@ -2865,7 +2843,9 @@
 #endif
   if (FLAG_code_comments) {
     if (actual.is_reg()) {
-      Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+      Comment(";;; PrepareForTailCall, actual: %s {",
+              RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
+                  actual.reg().code()));
     } else {
       Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
     }
@@ -3086,14 +3066,12 @@
 
 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->global_object())
-             .is(LoadDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->result()).Is(x0));
-  __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+
   EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
-                        isolate(), instr->typeof_mode(), PREMONOMORPHIC)
-                        .code();
+  Handle<Code> ic =
+      CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
+          .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3351,13 +3329,9 @@
   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-  }
+  EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
 
-  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
-                        isolate(), instr->hydrogen()->initialization_state())
-                        .code();
+  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 
   DCHECK(ToRegister(instr->result()).Is(x0));
@@ -3411,10 +3385,7 @@
   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
   __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
   EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
-                        isolate(), NOT_INSIDE_TYPEOF,
-                        instr->hydrogen()->initialization_state())
-                        .code();
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 
   DCHECK(ToRegister(instr->result()).is(x0));
@@ -3562,19 +3533,25 @@
   __ Bind(&done);
 }
 
+void LCodeGen::DoMathCos(LMathCos* instr) {
+  DCHECK(instr->IsMarkedAsCall());
+  DCHECK(ToDoubleRegister(instr->value()).is(d0));
+  __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
+  DCHECK(ToDoubleRegister(instr->result()).Is(d0));
+}
+
+void LCodeGen::DoMathSin(LMathSin* instr) {
+  DCHECK(instr->IsMarkedAsCall());
+  DCHECK(ToDoubleRegister(instr->value()).is(d0));
+  __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
+  DCHECK(ToDoubleRegister(instr->result()).Is(d0));
+}
 
 void LCodeGen::DoMathExp(LMathExp* instr) {
-  DoubleRegister input = ToDoubleRegister(instr->value());
-  DoubleRegister result = ToDoubleRegister(instr->result());
-  DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1());
-  DoubleRegister double_temp2 = double_scratch();
-  Register temp1 = ToRegister(instr->temp1());
-  Register temp2 = ToRegister(instr->temp2());
-  Register temp3 = ToRegister(instr->temp3());
-
-  MathExpGenerator::EmitMathExp(masm(), input, result,
-                                double_temp1, double_temp2,
-                                temp1, temp2, temp3);
+  DCHECK(instr->IsMarkedAsCall());
+  DCHECK(ToDoubleRegister(instr->value()).is(d0));
+  __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
+  DCHECK(ToDoubleRegister(instr->result()).Is(d0));
 }
 
 
@@ -3743,8 +3720,7 @@
 void LCodeGen::DoMathLog(LMathLog* instr) {
   DCHECK(instr->IsMarkedAsCall());
   DCHECK(ToDoubleRegister(instr->value()).is(d0));
-  __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
-                   0, 1);
+  __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
   DCHECK(ToDoubleRegister(instr->result()).Is(d0));
 }
 
@@ -4995,13 +4971,11 @@
   DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-  }
+  EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
 
   Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
-                        isolate(), instr->language_mode(),
-                        instr->hydrogen()->initialization_state()).code();
+                        isolate(), instr->language_mode())
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -5195,14 +5169,12 @@
   DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-  }
+  EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
 
   __ Mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
-  Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
-                        isolate(), instr->language_mode(),
-                        instr->hydrogen()->initialization_state()).code();
+  Handle<Code> ic =
+      CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
+          .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -5477,8 +5449,7 @@
     DCHECK(ToRegister(instr->context()).is(cp));
     PushSafepointRegistersScope scope(this);
     __ Mov(x1, Operand(to_map));
-    bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
-    TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+    TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
     __ CallStub(&stub);
     RecordSafepointWithRegisters(
         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
diff --git a/src/crankshaft/arm64/lithium-gap-resolver-arm64.h b/src/crankshaft/arm64/lithium-gap-resolver-arm64.h
index 4f5eb22..acac4e1 100644
--- a/src/crankshaft/arm64/lithium-gap-resolver-arm64.h
+++ b/src/crankshaft/arm64/lithium-gap-resolver-arm64.h
@@ -66,7 +66,8 @@
 
   // Registers used to solve cycles.
   const Register& SavedValueRegister() {
-    DCHECK(!masm_.ScratchRegister().IsAllocatable());
+    DCHECK(!RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(
+        masm_.ScratchRegister().code()));
     return masm_.ScratchRegister();
   }
   // The scratch register is used to break cycles and to store constant.
@@ -77,7 +78,8 @@
     // We use the Crankshaft floating-point scratch register to break a cycle
     // involving double values as the MacroAssembler will not need it for the
     // operations performed by the gap resolver.
-    DCHECK(!crankshaft_fp_scratch.IsAllocatable());
+    DCHECK(!RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(
+        crankshaft_fp_scratch.code()));
     return crankshaft_fp_scratch;
   }
 
diff --git a/src/crankshaft/hydrogen-instructions.cc b/src/crankshaft/hydrogen-instructions.cc
index 5e4ad37..6036d3f 100644
--- a/src/crankshaft/hydrogen-instructions.cc
+++ b/src/crankshaft/hydrogen-instructions.cc
@@ -5,6 +5,7 @@
 #include "src/crankshaft/hydrogen-instructions.h"
 
 #include "src/base/bits.h"
+#include "src/base/ieee754.h"
 #include "src/base/safe_math.h"
 #include "src/crankshaft/hydrogen-infer-representation.h"
 #include "src/double.h"
@@ -784,7 +785,6 @@
     case HValue::kCompareNumericAndBranch:
     case HValue::kCompareObjectEqAndBranch:
     case HValue::kConstant:
-    case HValue::kConstructDouble:
     case HValue::kContext:
     case HValue::kDebugBreak:
     case HValue::kDeclareGlobals:
@@ -1114,10 +1114,14 @@
       return "round";
     case kMathAbs:
       return "abs";
+    case kMathCos:
+      return "cos";
     case kMathLog:
       return "log";
     case kMathExp:
       return "exp";
+    case kMathSin:
+      return "sin";
     case kMathSqrt:
       return "sqrt";
     case kMathPowHalf:
@@ -1553,6 +1557,9 @@
     case IS_JS_ARRAY:
       *first = *last = JS_ARRAY_TYPE;
       return;
+    case IS_JS_FUNCTION:
+      *first = *last = JS_FUNCTION_TYPE;
+      return;
     case IS_JS_DATE:
       *first = *last = JS_DATE_TYPE;
       return;
@@ -1625,6 +1632,8 @@
   switch (check_) {
     case IS_JS_RECEIVER: return "object";
     case IS_JS_ARRAY: return "array";
+    case IS_JS_FUNCTION:
+      return "function";
     case IS_JS_DATE:
       return "date";
     case IS_STRING: return "string";
@@ -2412,9 +2421,9 @@
   if (handle->IsBoolean()) {
     res = handle->BooleanValue() ?
       new(zone) HConstant(1) : new(zone) HConstant(0);
-  } else if (handle->IsUndefined()) {
+  } else if (handle->IsUndefined(isolate)) {
     res = new (zone) HConstant(std::numeric_limits<double>::quiet_NaN());
-  } else if (handle->IsNull()) {
+  } else if (handle->IsNull(isolate)) {
     res = new(zone) HConstant(0);
   } else if (handle->IsString()) {
     res = new(zone) HConstant(String::ToNumber(Handle<String>::cast(handle)));
@@ -3395,6 +3404,9 @@
     }
     if (std::isinf(d)) {  // +Infinity and -Infinity.
       switch (op) {
+        case kMathCos:
+        case kMathSin:
+          return H_CONSTANT_DOUBLE(std::numeric_limits<double>::quiet_NaN());
         case kMathExp:
           return H_CONSTANT_DOUBLE((d > 0.0) ? d : 0.0);
         case kMathLog:
@@ -3416,11 +3428,14 @@
       }
     }
     switch (op) {
+      case kMathCos:
+        return H_CONSTANT_DOUBLE(base::ieee754::cos(d));
       case kMathExp:
-        lazily_initialize_fast_exp(isolate);
-        return H_CONSTANT_DOUBLE(fast_exp(d, isolate));
+        return H_CONSTANT_DOUBLE(base::ieee754::exp(d));
       case kMathLog:
-        return H_CONSTANT_DOUBLE(std::log(d));
+        return H_CONSTANT_DOUBLE(base::ieee754::log(d));
+      case kMathSin:
+        return H_CONSTANT_DOUBLE(base::ieee754::sin(d));
       case kMathSqrt:
         lazily_initialize_fast_sqrt(isolate);
         return H_CONSTANT_DOUBLE(fast_sqrt(d, isolate));
diff --git a/src/crankshaft/hydrogen-instructions.h b/src/crankshaft/hydrogen-instructions.h
index fdb1fd6..9a757c8 100644
--- a/src/crankshaft/hydrogen-instructions.h
+++ b/src/crankshaft/hydrogen-instructions.h
@@ -77,7 +77,6 @@
   V(CompareObjectEqAndBranch)                 \
   V(CompareMap)                               \
   V(Constant)                                 \
-  V(ConstructDouble)                          \
   V(Context)                                  \
   V(DebugBreak)                               \
   V(DeclareGlobals)                           \
@@ -1691,34 +1690,6 @@
 };
 
 
-class HConstructDouble final : public HTemplateInstruction<2> {
- public:
-  DECLARE_INSTRUCTION_FACTORY_P2(HConstructDouble, HValue*, HValue*);
-
-  Representation RequiredInputRepresentation(int index) override {
-    return Representation::Integer32();
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(ConstructDouble)
-
-  HValue* hi() { return OperandAt(0); }
-  HValue* lo() { return OperandAt(1); }
-
- protected:
-  bool DataEquals(HValue* other) override { return true; }
-
- private:
-  explicit HConstructDouble(HValue* hi, HValue* lo) {
-    set_representation(Representation::Double());
-    SetFlag(kUseGVN);
-    SetOperandAt(0, hi);
-    SetOperandAt(1, lo);
-  }
-
-  bool IsDeletable() const override { return true; }
-};
-
-
 enum RemovableSimulate {
   REMOVABLE_SIMULATE,
   FIXED_SIMULATE
@@ -2161,8 +2132,6 @@
     this->SetAllSideEffects();
   }
 
-  HType CalculateInferredType() final { return HType::Tagged(); }
-
   virtual int argument_count() const {
     return argument_count_;
   }
@@ -2240,8 +2209,6 @@
 
   DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor)
 
-  HType CalculateInferredType() final { return HType::Tagged(); }
-
   // Defines whether this instruction corresponds to a JS call at tail position.
   TailCallMode syntactic_tail_call_mode() const {
     return SyntacticTailCallModeField::decode(bit_field_);
@@ -2457,9 +2424,11 @@
       return Representation::Tagged();
     } else {
       switch (op_) {
+        case kMathCos:
         case kMathFloor:
         case kMathRound:
         case kMathFround:
+        case kMathSin:
         case kMathSqrt:
         case kMathPowHalf:
         case kMathLog:
@@ -2528,9 +2497,11 @@
         // is tagged, and not when it is an unboxed double or unboxed integer.
         SetChangesFlag(kNewSpacePromotion);
         break;
+      case kMathCos:
       case kMathFround:
       case kMathLog:
       case kMathExp:
+      case kMathSin:
       case kMathSqrt:
       case kMathPowHalf:
         set_representation(Representation::Double());
@@ -2790,6 +2761,7 @@
   enum Check {
     IS_JS_RECEIVER,
     IS_JS_ARRAY,
+    IS_JS_FUNCTION,
     IS_JS_DATE,
     IS_STRING,
     IS_INTERNALIZED_STRING,
@@ -2808,6 +2780,8 @@
     switch (check_) {
       case IS_JS_RECEIVER: return HType::JSReceiver();
       case IS_JS_ARRAY: return HType::JSArray();
+      case IS_JS_FUNCTION:
+        return HType::JSObject();
       case IS_JS_DATE: return HType::JSObject();
       case IS_STRING: return HType::String();
       case IS_INTERNALIZED_STRING: return HType::String();
@@ -3766,6 +3740,7 @@
       : HBinaryOperation(context, left, right, type) {
     SetFlag(kFlexibleRepresentation);
     SetFlag(kTruncatingToInt32);
+    SetFlag(kAllowUndefinedAsNaN);
     SetAllSideEffects();
   }
 
@@ -4374,6 +4349,11 @@
       SetChangesFlag(kNewSpacePromotion);
       ClearFlag(kAllowUndefinedAsNaN);
     }
+    if (!right()->type().IsTaggedNumber() &&
+        !right()->representation().IsDouble() &&
+        !right()->representation().IsSmiOrInteger32()) {
+      ClearFlag(kAllowUndefinedAsNaN);
+    }
   }
 
   Representation RepresentationFromInputs() override;
@@ -4872,26 +4852,20 @@
   HPhi* incoming_value_;
 };
 
-
-class HLoadGlobalGeneric final : public HTemplateInstruction<2> {
+class HLoadGlobalGeneric final : public HTemplateInstruction<1> {
  public:
-  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HLoadGlobalGeneric, HValue*,
-                                              Handle<String>, TypeofMode);
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HLoadGlobalGeneric,
+                                              Handle<String>, TypeofMode,
+                                              Handle<TypeFeedbackVector>,
+                                              FeedbackVectorSlot);
 
   HValue* context() { return OperandAt(0); }
-  HValue* global_object() { return OperandAt(1); }
   Handle<String> name() const { return name_; }
   TypeofMode typeof_mode() const { return typeof_mode_; }
   FeedbackVectorSlot slot() const { return slot_; }
   Handle<TypeFeedbackVector> feedback_vector() const {
     return feedback_vector_;
   }
-  bool HasVectorAndSlot() const { return true; }
-  void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
-                        FeedbackVectorSlot slot) {
-    feedback_vector_ = vector;
-    slot_ = slot;
-  }
 
   std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
 
@@ -4902,11 +4876,14 @@
   DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric)
 
  private:
-  HLoadGlobalGeneric(HValue* context, HValue* global_object,
-                     Handle<String> name, TypeofMode typeof_mode)
-      : name_(name), typeof_mode_(typeof_mode) {
+  HLoadGlobalGeneric(HValue* context, Handle<String> name,
+                     TypeofMode typeof_mode, Handle<TypeFeedbackVector> vector,
+                     FeedbackVectorSlot slot)
+      : name_(name),
+        typeof_mode_(typeof_mode),
+        feedback_vector_(vector),
+        slot_(slot) {
     SetOperandAt(0, context);
-    SetOperandAt(1, global_object);
     set_representation(Representation::Tagged());
     SetAllSideEffects();
   }
@@ -5148,8 +5125,11 @@
     HAllocate* allocate = HAllocate::cast(object);
     if (allocate->IsAllocationFolded()) {
       HValue* dominator = allocate->allocation_folding_dominator();
-      DCHECK(HAllocate::cast(dominator)->IsAllocationFoldingDominator());
-      object = dominator;
+      // There is no guarantee that all allocations are folded together because
+      // GVN performs a fixpoint.
+      if (HAllocate::cast(dominator)->IsAllocationFoldingDominator()) {
+        object = dominator;
+      }
     }
   }
 
@@ -5566,6 +5546,19 @@
                          Handle<Name>::null(), false, false);
   }
 
+  static HObjectAccess ForBoundTargetFunction() {
+    return HObjectAccess(kInobject,
+                         JSBoundFunction::kBoundTargetFunctionOffset);
+  }
+
+  static HObjectAccess ForBoundThis() {
+    return HObjectAccess(kInobject, JSBoundFunction::kBoundThisOffset);
+  }
+
+  static HObjectAccess ForBoundArguments() {
+    return HObjectAccess(kInobject, JSBoundFunction::kBoundArgumentsOffset);
+  }
+
   // Create an access to an offset in a fixed array header.
   static HObjectAccess ForFixedArrayHeader(int offset);
 
@@ -5887,26 +5880,19 @@
 
 class HLoadNamedGeneric final : public HTemplateInstruction<2> {
  public:
-  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HLoadNamedGeneric, HValue*,
-                                              Handle<Name>, InlineCacheState);
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HLoadNamedGeneric, HValue*,
+                                              Handle<Name>,
+                                              Handle<TypeFeedbackVector>,
+                                              FeedbackVectorSlot);
 
   HValue* context() const { return OperandAt(0); }
   HValue* object() const { return OperandAt(1); }
   Handle<Name> name() const { return name_; }
 
-  InlineCacheState initialization_state() const {
-    return initialization_state_;
-  }
   FeedbackVectorSlot slot() const { return slot_; }
   Handle<TypeFeedbackVector> feedback_vector() const {
     return feedback_vector_;
   }
-  bool HasVectorAndSlot() const { return true; }
-  void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
-                        FeedbackVectorSlot slot) {
-    feedback_vector_ = vector;
-    slot_ = slot;
-  }
 
   Representation RequiredInputRepresentation(int index) override {
     return Representation::Tagged();
@@ -5918,9 +5904,8 @@
 
  private:
   HLoadNamedGeneric(HValue* context, HValue* object, Handle<Name> name,
-                    InlineCacheState initialization_state)
-      : name_(name),
-        initialization_state_(initialization_state) {
+                    Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
+      : name_(name), feedback_vector_(vector), slot_(slot) {
     SetOperandAt(0, context);
     SetOperandAt(1, object);
     set_representation(Representation::Tagged());
@@ -5930,7 +5915,6 @@
   Handle<Name> name_;
   Handle<TypeFeedbackVector> feedback_vector_;
   FeedbackVectorSlot slot_;
-  InlineCacheState initialization_state_;
 };
 
 
@@ -6172,27 +6156,17 @@
 
 class HLoadKeyedGeneric final : public HTemplateInstruction<3> {
  public:
-  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HLoadKeyedGeneric, HValue*,
-                                              HValue*, InlineCacheState);
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HLoadKeyedGeneric, HValue*,
+                                              HValue*,
+                                              Handle<TypeFeedbackVector>,
+                                              FeedbackVectorSlot);
   HValue* object() const { return OperandAt(0); }
   HValue* key() const { return OperandAt(1); }
   HValue* context() const { return OperandAt(2); }
-  InlineCacheState initialization_state() const {
-    return initialization_state_;
-  }
   FeedbackVectorSlot slot() const { return slot_; }
   Handle<TypeFeedbackVector> feedback_vector() const {
     return feedback_vector_;
   }
-  bool HasVectorAndSlot() const {
-    DCHECK(initialization_state_ == MEGAMORPHIC || !feedback_vector_.is_null());
-    return !feedback_vector_.is_null();
-  }
-  void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
-                        FeedbackVectorSlot slot) {
-    feedback_vector_ = vector;
-    slot_ = slot;
-  }
 
   std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
 
@@ -6207,8 +6181,8 @@
 
  private:
   HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key,
-                    InlineCacheState initialization_state)
-      : initialization_state_(initialization_state) {
+                    Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
+      : feedback_vector_(vector), slot_(slot) {
     set_representation(Representation::Tagged());
     SetOperandAt(0, obj);
     SetOperandAt(1, key);
@@ -6218,7 +6192,6 @@
 
   Handle<TypeFeedbackVector> feedback_vector_;
   FeedbackVectorSlot slot_;
-  InlineCacheState initialization_state_;
 };
 
 
@@ -6380,20 +6353,18 @@
   uint32_t bit_field_;
 };
 
-
 class HStoreNamedGeneric final : public HTemplateInstruction<3> {
  public:
-  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(HStoreNamedGeneric, HValue*,
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P6(HStoreNamedGeneric, HValue*,
                                               Handle<Name>, HValue*,
-                                              LanguageMode, InlineCacheState);
+                                              LanguageMode,
+                                              Handle<TypeFeedbackVector>,
+                                              FeedbackVectorSlot);
   HValue* object() const { return OperandAt(0); }
   HValue* value() const { return OperandAt(1); }
   HValue* context() const { return OperandAt(2); }
   Handle<Name> name() const { return name_; }
   LanguageMode language_mode() const { return language_mode_; }
-  InlineCacheState initialization_state() const {
-    return initialization_state_;
-  }
 
   std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
 
@@ -6405,22 +6376,17 @@
   Handle<TypeFeedbackVector> feedback_vector() const {
     return feedback_vector_;
   }
-  bool HasVectorAndSlot() const { return true; }
-  void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
-                        FeedbackVectorSlot slot) {
-    feedback_vector_ = vector;
-    slot_ = slot;
-  }
 
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric)
 
  private:
   HStoreNamedGeneric(HValue* context, HValue* object, Handle<Name> name,
                      HValue* value, LanguageMode language_mode,
-                     InlineCacheState initialization_state)
+                     Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
       : name_(name),
-        language_mode_(language_mode),
-        initialization_state_(initialization_state) {
+        feedback_vector_(vector),
+        slot_(slot),
+        language_mode_(language_mode) {
     SetOperandAt(0, object);
     SetOperandAt(1, value);
     SetOperandAt(2, context);
@@ -6431,10 +6397,8 @@
   Handle<TypeFeedbackVector> feedback_vector_;
   FeedbackVectorSlot slot_;
   LanguageMode language_mode_;
-  InlineCacheState initialization_state_;
 };
 
-
 class HStoreKeyed final : public HTemplateInstruction<4>,
                           public ArrayInstructionInterface {
  public:
@@ -6617,21 +6581,18 @@
   HValue* dominator_;
 };
 
-
 class HStoreKeyedGeneric final : public HTemplateInstruction<4> {
  public:
-  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(HStoreKeyedGeneric, HValue*,
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P6(HStoreKeyedGeneric, HValue*,
                                               HValue*, HValue*, LanguageMode,
-                                              InlineCacheState);
+                                              Handle<TypeFeedbackVector>,
+                                              FeedbackVectorSlot);
 
   HValue* object() const { return OperandAt(0); }
   HValue* key() const { return OperandAt(1); }
   HValue* value() const { return OperandAt(2); }
   HValue* context() const { return OperandAt(3); }
   LanguageMode language_mode() const { return language_mode_; }
-  InlineCacheState initialization_state() const {
-    return initialization_state_;
-  }
 
   Representation RequiredInputRepresentation(int index) override {
     // tagged[tagged] = tagged
@@ -6642,14 +6603,6 @@
   Handle<TypeFeedbackVector> feedback_vector() const {
     return feedback_vector_;
   }
-  bool HasVectorAndSlot() const {
-    return !feedback_vector_.is_null();
-  }
-  void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
-                        FeedbackVectorSlot slot) {
-    feedback_vector_ = vector;
-    slot_ = slot;
-  }
 
   std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
 
@@ -6658,9 +6611,8 @@
  private:
   HStoreKeyedGeneric(HValue* context, HValue* object, HValue* key,
                      HValue* value, LanguageMode language_mode,
-                     InlineCacheState initialization_state)
-      : language_mode_(language_mode),
-        initialization_state_(initialization_state) {
+                     Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
+      : feedback_vector_(vector), slot_(slot), language_mode_(language_mode) {
     SetOperandAt(0, object);
     SetOperandAt(1, key);
     SetOperandAt(2, value);
@@ -6671,10 +6623,8 @@
   Handle<TypeFeedbackVector> feedback_vector_;
   FeedbackVectorSlot slot_;
   LanguageMode language_mode_;
-  InlineCacheState initialization_state_;
 };
 
-
 class HTransitionElementsKind final : public HTemplateInstruction<2> {
  public:
   inline static HTransitionElementsKind* New(Isolate* isolate, Zone* zone,
diff --git a/src/crankshaft/hydrogen-types.cc b/src/crankshaft/hydrogen-types.cc
index 4266e28..20d50d8 100644
--- a/src/crankshaft/hydrogen-types.cc
+++ b/src/crankshaft/hydrogen-types.cc
@@ -34,23 +34,25 @@
 
 // static
 HType HType::FromValue(Handle<Object> value) {
-  if (value->IsSmi()) return HType::Smi();
-  if (value->IsNull()) return HType::Null();
-  if (value->IsHeapNumber()) {
+  Object* raw_value = *value;
+  if (raw_value->IsSmi()) return HType::Smi();
+  DCHECK(raw_value->IsHeapObject());
+  Isolate* isolate = HeapObject::cast(*value)->GetIsolate();
+  if (raw_value->IsNull(isolate)) return HType::Null();
+  if (raw_value->IsHeapNumber()) {
     double n = Handle<v8::internal::HeapNumber>::cast(value)->value();
     return IsSmiDouble(n) ? HType::Smi() : HType::HeapNumber();
   }
-  if (value->IsString()) return HType::String();
-  if (value->IsBoolean()) return HType::Boolean();
-  if (value->IsUndefined()) return HType::Undefined();
-  if (value->IsJSArray()) {
-    DCHECK(!value->IsUndetectable());
+  if (raw_value->IsString()) return HType::String();
+  if (raw_value->IsBoolean()) return HType::Boolean();
+  if (raw_value->IsUndefined(isolate)) return HType::Undefined();
+  if (raw_value->IsJSArray()) {
+    DCHECK(!raw_value->IsUndetectable());
     return HType::JSArray();
   }
-  if (value->IsJSObject() && !value->IsUndetectable()) {
+  if (raw_value->IsJSObject() && !raw_value->IsUndetectable()) {
     return HType::JSObject();
   }
-  DCHECK(value->IsHeapObject());
   return HType::HeapObject();
 }
 
diff --git a/src/crankshaft/hydrogen.cc b/src/crankshaft/hydrogen.cc
index 9c5fa15..1a6f863 100644
--- a/src/crankshaft/hydrogen.cc
+++ b/src/crankshaft/hydrogen.cc
@@ -68,6 +68,8 @@
 namespace v8 {
 namespace internal {
 
+const auto GetRegConfig = RegisterConfiguration::Crankshaft;
+
 class HOptimizedGraphBuilderWithPositions : public HOptimizedGraphBuilder {
  public:
   explicit HOptimizedGraphBuilderWithPositions(CompilationInfo* info)
@@ -1375,10 +1377,11 @@
 
   int inline_id = static_cast<int>(graph()->inlined_function_infos().size());
   HInlinedFunctionInfo info(shared->start_position());
-  if (!shared->script()->IsUndefined()) {
-    Handle<Script> script(Script::cast(shared->script()));
+  if (!shared->script()->IsUndefined(isolate())) {
+    Handle<Script> script(Script::cast(shared->script()), isolate());
 
-    if (FLAG_hydrogen_track_positions && !script->source()->IsUndefined()) {
+    if (FLAG_hydrogen_track_positions &&
+        !script->source()->IsUndefined(isolate())) {
       CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
       Object* source_name = script->name();
       OFStream os(tracing_scope.file());
@@ -5669,10 +5672,10 @@
   // We also have a stack overflow if the recursive compilation did.
   if (HasStackOverflow()) return;
   // Use the fast case closure allocation code that allocates in new
-  // space for nested functions that don't need literals cloning.
+  // space for nested functions that don't need pretenuring.
   HConstant* shared_info_value = Add<HConstant>(shared_info);
   HInstruction* instr;
-  if (!expr->pretenure() && shared_info->num_literals() == 0) {
+  if (!expr->pretenure()) {
     FastNewClosureStub stub(isolate(), shared_info->language_mode(),
                             shared_info->kind());
     FastNewClosureDescriptor descriptor(isolate());
@@ -5832,7 +5835,7 @@
 
           // If the values is not the hole, it will stay initialized,
           // so no need to generate a check.
-          if (*current_value == *isolate()->factory()->the_hole_value()) {
+          if (current_value->IsTheHole(isolate())) {
             return Bailout(kReferenceToUninitializedVariable);
           }
           HInstruction* result = New<HLoadNamedField>(
@@ -5895,13 +5898,10 @@
           return ast_context()->ReturnInstruction(instr, expr->id());
         }
       } else {
-        HValue* global_object = Add<HLoadNamedField>(
-            BuildGetNativeContext(), nullptr,
-            HObjectAccess::ForContextSlot(Context::EXTENSION_INDEX));
+        Handle<TypeFeedbackVector> vector(current_feedback_vector(), isolate());
         HLoadGlobalGeneric* instr = New<HLoadGlobalGeneric>(
-            global_object, variable->name(), ast_context()->typeof_mode());
-        instr->SetVectorAndSlot(handle(current_feedback_vector(), isolate()),
-                                expr->VariableFeedbackSlot());
+            variable->name(), ast_context()->typeof_mode(), vector,
+            expr->VariableFeedbackSlot());
         return ast_context()->ReturnInstruction(instr, expr->id());
       }
     }
@@ -6055,7 +6055,7 @@
       closure->literals()->literal(expr->literal_index()), isolate());
   Handle<AllocationSite> site;
   Handle<JSObject> boilerplate;
-  if (!literals_cell->IsUndefined()) {
+  if (!literals_cell->IsUndefined(isolate())) {
     // Retrieve the boilerplate
     site = Handle<AllocationSite>::cast(literals_cell);
     boilerplate = Handle<JSObject>(JSObject::cast(site->transition_info()),
@@ -6173,7 +6173,7 @@
   Handle<Object> literals_cell(literals->literal(expr->literal_index()),
                                isolate());
   Handle<JSObject> boilerplate_object;
-  if (!literals_cell->IsUndefined()) {
+  if (!literals_cell->IsUndefined(isolate())) {
     DCHECK(literals_cell->IsAllocationSite());
     site = Handle<AllocationSite>::cast(literals_cell);
     boilerplate_object = Handle<JSObject>(
@@ -6562,7 +6562,6 @@
 bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessMonomorphic() {
   if (!CanInlinePropertyAccess(map_)) return false;
   if (IsJSObjectFieldAccessor()) return IsLoad();
-  if (IsJSArrayBufferViewFieldAccessor()) return IsLoad();
   if (map_->IsJSFunctionMap() && map_->is_constructor() &&
       !map_->has_non_instance_prototype() &&
       name_.is_identical_to(isolate()->factory()->prototype_string())) {
@@ -6610,17 +6609,6 @@
     }
     return true;
   }
-  if (GetJSArrayBufferViewFieldAccess(&access)) {
-    for (int i = 1; i < maps->length(); ++i) {
-      PropertyAccessInfo test_info(builder_, access_type_, maps->at(i), name_);
-      HObjectAccess test_access = HObjectAccess::ForMap();  // bogus default
-      if (!test_info.GetJSArrayBufferViewFieldAccess(&test_access)) {
-        return false;
-      }
-      if (!access.Equals(test_access)) return false;
-    }
-    return true;
-  }
 
   // Currently only handle numbers as a polymorphic case.
   // TODO(verwaest): Support monomorphic handling of numbers with a HCheckNumber
@@ -6674,12 +6662,6 @@
     return New<HLoadNamedField>(object, checked_object, access);
   }
 
-  if (info->GetJSArrayBufferViewFieldAccess(&access)) {
-    DCHECK(info->IsLoad());
-    checked_object = Add<HCheckArrayBufferNotNeutered>(checked_object);
-    return New<HLoadNamedField>(object, checked_object, access);
-  }
-
   if (info->name().is_identical_to(isolate()->factory()->prototype_string()) &&
       info->map()->IsJSFunctionMap() && info->map()->is_constructor()) {
     DCHECK(!info->map()->has_non_instance_prototype());
@@ -7032,7 +7014,7 @@
 
       // If the values is not the hole, it will stay initialized,
       // so no need to generate a check.
-      if (*current_value == *isolate()->factory()->the_hole_value()) {
+      if (current_value->IsTheHole(isolate())) {
         return Bailout(kReferenceToUninitializedVariable);
       }
 
@@ -7104,12 +7086,11 @@
     HValue* global_object = Add<HLoadNamedField>(
         BuildGetNativeContext(), nullptr,
         HObjectAccess::ForContextSlot(Context::EXTENSION_INDEX));
-    HStoreNamedGeneric* instr =
-        Add<HStoreNamedGeneric>(global_object, var->name(), value,
-                                function_language_mode(), PREMONOMORPHIC);
     Handle<TypeFeedbackVector> vector =
         handle(current_feedback_vector(), isolate());
-    instr->SetVectorAndSlot(vector, slot);
+    HStoreNamedGeneric* instr =
+        Add<HStoreNamedGeneric>(global_object, var->name(), value,
+                                function_language_mode(), vector, slot);
     USE(instr);
     DCHECK(instr->HasObservableSideEffects());
     Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -7435,16 +7416,17 @@
       // it has to share information with full code.
       HConstant* key = Add<HConstant>(name);
       HLoadKeyedGeneric* result =
-          New<HLoadKeyedGeneric>(object, key, PREMONOMORPHIC);
-      result->SetVectorAndSlot(vector, slot);
+          New<HLoadKeyedGeneric>(object, key, vector, slot);
       return result;
     }
 
     HLoadNamedGeneric* result =
-        New<HLoadNamedGeneric>(object, name, PREMONOMORPHIC);
-    result->SetVectorAndSlot(vector, slot);
+        New<HLoadNamedGeneric>(object, name, vector, slot);
     return result;
   } else {
+    Handle<TypeFeedbackVector> vector =
+        handle(current_feedback_vector(), isolate());
+
     if (current_feedback_vector()->GetKind(slot) ==
         FeedbackVectorSlotKind::KEYED_STORE_IC) {
       // It's possible that a keyed store of a constant string was converted
@@ -7453,18 +7435,12 @@
       // it has to share information with full code.
       HConstant* key = Add<HConstant>(name);
       HStoreKeyedGeneric* result = New<HStoreKeyedGeneric>(
-          object, key, value, function_language_mode(), PREMONOMORPHIC);
-      Handle<TypeFeedbackVector> vector =
-          handle(current_feedback_vector(), isolate());
-      result->SetVectorAndSlot(vector, slot);
+          object, key, value, function_language_mode(), vector, slot);
       return result;
     }
 
     HStoreNamedGeneric* result = New<HStoreNamedGeneric>(
-        object, name, value, function_language_mode(), PREMONOMORPHIC);
-    Handle<TypeFeedbackVector> vector =
-        handle(current_feedback_vector(), isolate());
-    result->SetVectorAndSlot(vector, slot);
+        object, name, value, function_language_mode(), vector, slot);
     return result;
   }
 }
@@ -7473,25 +7449,15 @@
 HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
     PropertyAccessType access_type, Expression* expr, FeedbackVectorSlot slot,
     HValue* object, HValue* key, HValue* value) {
+  Handle<TypeFeedbackVector> vector =
+      handle(current_feedback_vector(), isolate());
   if (access_type == LOAD) {
-    InlineCacheState initial_state = expr->AsProperty()->GetInlineCacheState();
     HLoadKeyedGeneric* result =
-        New<HLoadKeyedGeneric>(object, key, initial_state);
-    // HLoadKeyedGeneric with vector ics benefits from being encoded as
-    // MEGAMORPHIC because the vector/slot combo becomes unnecessary.
-    if (initial_state != MEGAMORPHIC) {
-      // We need to pass vector information.
-      Handle<TypeFeedbackVector> vector =
-          handle(current_feedback_vector(), isolate());
-      result->SetVectorAndSlot(vector, slot);
-    }
+        New<HLoadKeyedGeneric>(object, key, vector, slot);
     return result;
   } else {
     HStoreKeyedGeneric* result = New<HStoreKeyedGeneric>(
-        object, key, value, function_language_mode(), PREMONOMORPHIC);
-    Handle<TypeFeedbackVector> vector =
-        handle(current_feedback_vector(), isolate());
-    result->SetVectorAndSlot(vector, slot);
+        object, key, value, function_language_mode(), vector, slot);
     return result;
   }
 }
@@ -7972,6 +7938,11 @@
       return false;
     }
 
+    // Make sure we visit the arguments object so that the liveness analysis
+    // still records the access.
+    CHECK_ALIVE_OR_RETURN(VisitForValue(expr->obj(), ARGUMENTS_ALLOWED), true);
+    Drop(1);
+
     if (function_state()->outer() == NULL) {
       HInstruction* elements = Add<HArgumentsElements>(false);
       result = New<HArgumentsLength>(elements);
@@ -8125,8 +8096,7 @@
 
 HInstruction* HGraphBuilder::BuildCheckPrototypeMaps(Handle<JSObject> prototype,
                                                      Handle<JSObject> holder) {
-  PrototypeIterator iter(isolate(), prototype,
-                         PrototypeIterator::START_AT_RECEIVER);
+  PrototypeIterator iter(isolate(), prototype, kStartAtReceiver);
   while (holder.is_null() ||
          !PrototypeIterator::GetCurrent(iter).is_identical_to(holder)) {
     BuildConstantMapCheck(PrototypeIterator::GetCurrent<JSObject>(iter));
@@ -8650,6 +8620,7 @@
     TraceInline(target, caller, "could not generate deoptimization info");
     return false;
   }
+
   // Remember that we inlined this function. This needs to be called right
   // after the EnsureDeoptimizationSupport call so that the code flusher
   // does not remove the code with the deoptimization support.
@@ -8659,6 +8630,9 @@
   // After this point, we've made a decision to inline this function (so
   // TryInline should always return true).
 
+  // If target was lazily compiled, it's literals array may not yet be set up.
+  JSFunction::EnsureLiterals(target);
+
   // Type-check the inlined function.
   DCHECK(target_shared->has_deoptimization_support());
   AstTyper(target_info.isolate(), target_info.zone(), target_info.closure(),
@@ -8844,9 +8818,13 @@
                                              BailoutId ast_id,
                                              BailoutId return_id) {
   if (TryInlineApiGetter(getter, receiver_map, ast_id)) return true;
-  return getter->IsJSFunction() &&
-         TryInline(Handle<JSFunction>::cast(getter), 0, NULL, ast_id, return_id,
-                   GETTER_CALL_RETURN, TailCallMode::kDisallow);
+  if (getter->IsJSFunction()) {
+    Handle<JSFunction> getter_function = Handle<JSFunction>::cast(getter);
+    return TryInlineBuiltinGetterCall(getter_function, receiver_map, ast_id) ||
+           TryInline(getter_function, 0, NULL, ast_id, return_id,
+                     GETTER_CALL_RETURN, TailCallMode::kDisallow);
+  }
+  return false;
 }
 
 bool HOptimizedGraphBuilder::TryInlineSetter(Handle<Object> setter,
@@ -8876,13 +8854,13 @@
   // We intentionally ignore expr->tail_call_mode() here because builtins
   // we inline here do not observe if they were tail called or not.
   switch (id) {
+    case kMathCos:
     case kMathExp:
-      if (!FLAG_fast_math) break;
-      // Fall through if FLAG_fast_math.
     case kMathRound:
     case kMathFround:
     case kMathFloor:
     case kMathAbs:
+    case kMathSin:
     case kMathSqrt:
     case kMathLog:
     case kMathClz32:
@@ -8938,9 +8916,62 @@
          !IsReadOnlyLengthDescriptor(receiver_map);
 }
 
+bool HOptimizedGraphBuilder::TryInlineBuiltinGetterCall(
+    Handle<JSFunction> function, Handle<Map> receiver_map, BailoutId ast_id) {
+  if (!function->shared()->HasBuiltinFunctionId()) return false;
+  BuiltinFunctionId id = function->shared()->builtin_function_id();
+
+  // Try to inline getter calls like DataView.prototype.byteLength/byteOffset
+  // as operations in the calling function.
+  switch (id) {
+    case kDataViewBuffer: {
+      if (!receiver_map->IsJSDataViewMap()) return false;
+      HObjectAccess access = HObjectAccess::ForMapAndOffset(
+          receiver_map, JSDataView::kBufferOffset);
+      HValue* object = Pop();  // receiver
+      HInstruction* result = New<HLoadNamedField>(object, object, access);
+      ast_context()->ReturnInstruction(result, ast_id);
+      return true;
+    }
+    case kDataViewByteLength:
+    case kDataViewByteOffset: {
+      if (!receiver_map->IsJSDataViewMap()) return false;
+      int offset = (id == kDataViewByteLength) ? JSDataView::kByteLengthOffset
+                                               : JSDataView::kByteOffsetOffset;
+      HObjectAccess access =
+          HObjectAccess::ForMapAndOffset(receiver_map, offset);
+      HValue* object = Pop();  // receiver
+      HValue* checked_object = Add<HCheckArrayBufferNotNeutered>(object);
+      HInstruction* result =
+          New<HLoadNamedField>(object, checked_object, access);
+      ast_context()->ReturnInstruction(result, ast_id);
+      return true;
+    }
+    case kTypedArrayByteLength:
+    case kTypedArrayByteOffset:
+    case kTypedArrayLength: {
+      if (!receiver_map->IsJSTypedArrayMap()) return false;
+      int offset = (id == kTypedArrayLength)
+                       ? JSTypedArray::kLengthOffset
+                       : (id == kTypedArrayByteLength)
+                             ? JSTypedArray::kByteLengthOffset
+                             : JSTypedArray::kByteOffsetOffset;
+      HObjectAccess access =
+          HObjectAccess::ForMapAndOffset(receiver_map, offset);
+      HValue* object = Pop();  // receiver
+      HValue* checked_object = Add<HCheckArrayBufferNotNeutered>(object);
+      HInstruction* result =
+          New<HLoadNamedField>(object, checked_object, access);
+      ast_context()->ReturnInstruction(result, ast_id);
+      return true;
+    }
+    default:
+      return false;
+  }
+}
 
 bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
-    Call* expr, Handle<JSFunction> function, Handle<Map> receiver_map,
+    Handle<JSFunction> function, Handle<Map> receiver_map, BailoutId ast_id,
     int args_count_no_receiver) {
   if (!function->shared()->HasBuiltinFunctionId()) return false;
   BuiltinFunctionId id = function->shared()->builtin_function_id();
@@ -8985,12 +9016,12 @@
         HInstruction* char_code =
             BuildStringCharCodeAt(string, index);
         if (id == kStringCharCodeAt) {
-          ast_context()->ReturnInstruction(char_code, expr->id());
+          ast_context()->ReturnInstruction(char_code, ast_id);
           return true;
         }
         AddInstruction(char_code);
         HInstruction* result = NewUncasted<HStringCharFromCode>(char_code);
-        ast_context()->ReturnInstruction(result, expr->id());
+        ast_context()->ReturnInstruction(result, ast_id);
         return true;
       }
       break;
@@ -9002,17 +9033,17 @@
             argument, Representation::Integer32());
         argument->SetFlag(HValue::kTruncatingToInt32);
         HInstruction* result = NewUncasted<HStringCharFromCode>(argument);
-        ast_context()->ReturnInstruction(result, expr->id());
+        ast_context()->ReturnInstruction(result, ast_id);
         return true;
       }
       break;
+    case kMathCos:
     case kMathExp:
-      if (!FLAG_fast_math) break;
-      // Fall through if FLAG_fast_math.
     case kMathRound:
     case kMathFround:
     case kMathFloor:
     case kMathAbs:
+    case kMathSin:
     case kMathSqrt:
     case kMathLog:
     case kMathClz32:
@@ -9020,7 +9051,7 @@
         HValue* argument = Pop();
         Drop(2);  // Receiver and function.
         HInstruction* op = NewUncasted<HUnaryMathOperation>(argument, id);
-        ast_context()->ReturnInstruction(op, expr->id());
+        ast_context()->ReturnInstruction(op, ast_id);
         return true;
       }
       break;
@@ -9051,7 +9082,7 @@
         if (result == NULL) {
           result = NewUncasted<HPower>(left, right);
         }
-        ast_context()->ReturnInstruction(result, expr->id());
+        ast_context()->ReturnInstruction(result, ast_id);
         return true;
       }
       break;
@@ -9064,7 +9095,7 @@
         HMathMinMax::Operation op = (id == kMathMin) ? HMathMinMax::kMathMin
                                                      : HMathMinMax::kMathMax;
         HInstruction* result = NewUncasted<HMathMinMax>(left, right, op);
-        ast_context()->ReturnInstruction(result, expr->id());
+        ast_context()->ReturnInstruction(result, ast_id);
         return true;
       }
       break;
@@ -9075,7 +9106,7 @@
         Drop(2);  // Receiver and function.
         HInstruction* result =
             HMul::NewImul(isolate(), zone(), context(), left, right);
-        ast_context()->ReturnInstruction(result, expr->id());
+        ast_context()->ReturnInstruction(result, ast_id);
         return true;
       }
       break;
@@ -9131,7 +9162,7 @@
         length_checker.End();
       }
       result = ast_context()->IsEffect() ? graph()->GetConstant0() : Top();
-      Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+      Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
       if (!ast_context()->IsEffect()) Drop(1);
 
       ast_context()->ReturnValue(result);
@@ -9184,7 +9215,7 @@
             STORE, NEVER_RETURN_HOLE, STORE_AND_GROW_NO_TRANSITION);
 
         if (!ast_context()->IsEffect()) Push(new_size);
-        Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+        Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
         if (!ast_context()->IsEffect()) Drop(1);
       }
 
@@ -9298,7 +9329,7 @@
         if_lengthiszero.End();
       }
       result = ast_context()->IsEffect() ? graph()->GetConstant0() : Top();
-      Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+      Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
       if (!ast_context()->IsEffect()) Drop(1);
       ast_context()->ReturnValue(result);
       return true;
@@ -9335,7 +9366,7 @@
       HValue* index = BuildArrayIndexOf(receiver, search_element, kind, mode);
 
       if (!ast_context()->IsEffect()) Push(index);
-      Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+      Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
       if (!ast_context()->IsEffect()) Drop(1);
       ast_context()->ReturnValue(index);
       return true;
@@ -9486,7 +9517,7 @@
   }
   Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
   Handle<Object> call_data_obj(api_call_info->data(), isolate());
-  bool call_data_undefined = call_data_obj->IsUndefined();
+  bool call_data_undefined = call_data_obj->IsUndefined(isolate());
   HValue* call_data = Add<HConstant>(call_data_obj);
   ApiFunction fun(v8::ToCData<Address>(api_call_info->callback()));
   ExternalReference ref = ExternalReference(&fun,
@@ -9532,7 +9563,7 @@
       HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
     known_function =
         Handle<JSFunction>::cast(HConstant::cast(function)->handle(isolate()));
-    if (TryInlineBuiltinMethodCall(expr, known_function, Handle<Map>(),
+    if (TryInlineBuiltinMethodCall(known_function, Handle<Map>(), expr->id(),
                                    args_count_no_receiver)) {
       if (FLAG_trace_inlining) {
         PrintF("Inlining builtin ");
@@ -9933,7 +9964,7 @@
       CHECK_ALIVE(VisitExpressions(expr->arguments()));
 
       Handle<Map> map = maps->length() == 1 ? maps->first() : Handle<Map>();
-      if (TryInlineBuiltinMethodCall(expr, known_function, map,
+      if (TryInlineBuiltinMethodCall(known_function, map, expr->id(),
                                      expr->arguments()->length())) {
         if (FLAG_trace_inlining) {
           PrintF("Inlining builtin ");
@@ -11627,18 +11658,22 @@
   return ast_context()->ReturnControl(instr, expr->id());
 }
 
+namespace {
 
-static bool IsLiteralCompareBool(Isolate* isolate,
-                                 HValue* left,
-                                 Token::Value op,
-                                 HValue* right) {
+bool IsLiteralCompareStrict(Isolate* isolate, HValue* left, Token::Value op,
+                            HValue* right) {
   return op == Token::EQ_STRICT &&
-      ((left->IsConstant() &&
-          HConstant::cast(left)->handle(isolate)->IsBoolean()) ||
-       (right->IsConstant() &&
-           HConstant::cast(right)->handle(isolate)->IsBoolean()));
+         ((left->IsConstant() &&
+           !HConstant::cast(left)->handle(isolate)->IsNumber() &&
+           !HConstant::cast(left)->handle(isolate)->IsSimd128Value() &&
+           !HConstant::cast(left)->handle(isolate)->IsString()) ||
+          (right->IsConstant() &&
+           !HConstant::cast(right)->handle(isolate)->IsNumber() &&
+           !HConstant::cast(right)->handle(isolate)->IsSimd128Value() &&
+           !HConstant::cast(right)->handle(isolate)->IsString()));
 }
 
+}  // namespace
 
 void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
   DCHECK(!HasStackOverflow());
@@ -11684,7 +11719,7 @@
   HValue* left = Pop();
   Token::Value op = expr->op();
 
-  if (IsLiteralCompareBool(isolate(), left, op, right)) {
+  if (IsLiteralCompareStrict(isolate(), left, op, right)) {
     HCompareObjectEqAndBranch* result =
         New<HCompareObjectEqAndBranch>(left, right);
     return ast_context()->ReturnControl(result, expr->id());
@@ -12169,9 +12204,9 @@
             double_box, HObjectAccess::ForHeapNumberValue(), double_value);
         value_instruction = double_box;
       } else if (representation.IsSmi()) {
-        value_instruction = value->IsUninitialized()
-            ? graph()->GetConstant0()
-            : Add<HConstant>(value);
+        value_instruction = value->IsUninitialized(isolate())
+                                ? graph()->GetConstant0()
+                                : Add<HConstant>(value);
         // Ensure that value is stored as smi.
         access = access.WithRepresentation(representation);
       } else {
@@ -12635,38 +12670,6 @@
 }
 
 
-void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
-    CallRuntime* call) {
-  DCHECK(call->arguments()->length() == 3);
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
-  HValue* string = Pop();
-  HValue* value = Pop();
-  HValue* index = Pop();
-  Add<HSeqStringSetChar>(String::ONE_BYTE_ENCODING, string,
-                         index, value);
-  Add<HSimulate>(call->id(), FIXED_SIMULATE);
-  return ast_context()->ReturnValue(graph()->GetConstantUndefined());
-}
-
-
-void HOptimizedGraphBuilder::GenerateTwoByteSeqStringSetChar(
-    CallRuntime* call) {
-  DCHECK(call->arguments()->length() == 3);
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
-  HValue* string = Pop();
-  HValue* value = Pop();
-  HValue* index = Pop();
-  Add<HSeqStringSetChar>(String::TWO_BYTE_ENCODING, string,
-                         index, value);
-  Add<HSimulate>(call->id(), FIXED_SIMULATE);
-  return ast_context()->ReturnValue(graph()->GetConstantUndefined());
-}
-
-
 // Fast support for charCodeAt(n).
 void HOptimizedGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
   DCHECK(call->arguments()->length() == 2);
@@ -12689,20 +12692,6 @@
 }
 
 
-// Fast support for string.charAt(n) and string[n].
-void HOptimizedGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
-  DCHECK(call->arguments()->length() == 2);
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
-  HValue* index = Pop();
-  HValue* string = Pop();
-  HInstruction* char_code = BuildStringCharCodeAt(string, index);
-  AddInstruction(char_code);
-  HInstruction* result = NewUncasted<HStringCharFromCode>(char_code);
-  return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
 // Fast support for SubString.
 void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
   DCHECK_EQ(3, call->arguments()->length());
@@ -12784,17 +12773,6 @@
 }
 
 
-void HOptimizedGraphBuilder::GenerateConstructDouble(CallRuntime* call) {
-  DCHECK_EQ(2, call->arguments()->length());
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
-  HValue* lo = Pop();
-  HValue* hi = Pop();
-  HInstruction* result = NewUncasted<HConstructDouble>(hi, lo);
-  return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
 // Construct a RegExp exec result with two in-object properties.
 void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
   DCHECK_EQ(3, call->arguments()->length());
@@ -12848,15 +12826,6 @@
 }
 
 
-void HOptimizedGraphBuilder::GenerateMathLogRT(CallRuntime* call) {
-  DCHECK(call->arguments()->length() == 1);
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
-  HValue* value = Pop();
-  HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathLog);
-  return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
 void HOptimizedGraphBuilder::GenerateFixedArrayGet(CallRuntime* call) {
   DCHECK(call->arguments()->length() == 2);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -13385,12 +13354,14 @@
 void HTracer::TraceCompilation(CompilationInfo* info) {
   Tag tag(this, "compilation");
   std::string name;
-  Object* source_name = info->script()->name();
-  if (source_name->IsString()) {
-    String* str = String::cast(source_name);
-    if (str->length() > 0) {
-      name.append(str->ToCString().get());
-      name.append(":");
+  if (info->parse_info()) {
+    Object* source_name = info->script()->name();
+    if (source_name->IsString()) {
+      String* str = String::cast(source_name);
+      if (str->length() > 0) {
+        name.append(str->ToCString().get());
+        name.append(":");
+      }
     }
   }
   base::SmartArrayPointer<char> method_name = info->GetDebugName();
@@ -13583,10 +13554,11 @@
       int assigned_reg = op->index();
       if (op->IsDoubleRegister()) {
         trace_.Add(" \"%s\"",
-                   DoubleRegister::from_code(assigned_reg).ToString());
+                   GetRegConfig()->GetDoubleRegisterName(assigned_reg));
       } else {
         DCHECK(op->IsRegister());
-        trace_.Add(" \"%s\"", Register::from_code(assigned_reg).ToString());
+        trace_.Add(" \"%s\"",
+                   GetRegConfig()->GetGeneralRegisterName(assigned_reg));
       }
     } else if (range->IsSpilled()) {
       LOperand* op = range->TopLevel()->GetSpillOperand();
diff --git a/src/crankshaft/hydrogen.h b/src/crankshaft/hydrogen.h
index 3811773..d0f757b 100644
--- a/src/crankshaft/hydrogen.h
+++ b/src/crankshaft/hydrogen.h
@@ -1333,7 +1333,7 @@
             class P7, class P8, class P9>
   HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7,
                             P8 p8, P9 p9) {
-    return AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5, p6, p7, p8, p8));
+    return AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5, p6, p7, p8, p9));
   }
 
   template <class I, class P1, class P2, class P3, class P4, class P5, class P6,
@@ -2241,11 +2241,14 @@
   TestContext* inlined_test_context() const {
     return function_state()->test_context();
   }
+  Handle<JSFunction> current_closure() const {
+    return current_info()->closure();
+  }
   Handle<SharedFunctionInfo> current_shared_info() const {
     return current_info()->shared_info();
   }
   TypeFeedbackVector* current_feedback_vector() const {
-    return current_shared_info()->feedback_vector();
+    return current_closure()->feedback_vector();
   }
   void ClearInlinedTestContext() {
     function_state()->ClearInlinedTestContext();
@@ -2264,9 +2267,6 @@
   F(NewObject)                         \
   F(ValueOf)                           \
   F(StringCharFromCode)                \
-  F(StringCharAt)                      \
-  F(OneByteSeqStringSetChar)           \
-  F(TwoByteSeqStringSetChar)           \
   F(ToInteger)                         \
   F(ToName)                            \
   F(ToObject)                          \
@@ -2296,10 +2296,8 @@
   /* ArrayBuffer */                    \
   F(ArrayBufferGetByteLength)          \
   /* Maths */                          \
-  F(ConstructDouble)                   \
   F(DoubleHi)                          \
   F(DoubleLo)                          \
-  F(MathLogRT)                         \
   /* ES6 Collections */                \
   F(MapClear)                          \
   F(MapInitialize)                     \
@@ -2362,21 +2360,19 @@
   void Bind(Variable* var, HValue* value) { environment()->Bind(var, value); }
   bool IsEligibleForEnvironmentLivenessAnalysis(Variable* var,
                                                 int index,
-                                                HValue* value,
                                                 HEnvironment* env) {
     if (!FLAG_analyze_environment_liveness) return false;
     // |this| and |arguments| are always live; zapping parameters isn't
     // safe because function.arguments can inspect them at any time.
     return !var->is_this() &&
            !var->is_arguments() &&
-           !value->IsArgumentsObject() &&
            env->is_local_index(index);
   }
   void BindIfLive(Variable* var, HValue* value) {
     HEnvironment* env = environment();
     int index = env->IndexFor(var);
     env->Bind(index, value);
-    if (IsEligibleForEnvironmentLivenessAnalysis(var, index, value, env)) {
+    if (IsEligibleForEnvironmentLivenessAnalysis(var, index, env)) {
       HEnvironmentMarker* bind =
           Add<HEnvironmentMarker>(HEnvironmentMarker::BIND, index);
       USE(bind);
@@ -2388,8 +2384,7 @@
   HValue* LookupAndMakeLive(Variable* var) {
     HEnvironment* env = environment();
     int index = env->IndexFor(var);
-    HValue* value = env->Lookup(index);
-    if (IsEligibleForEnvironmentLivenessAnalysis(var, index, value, env)) {
+    if (IsEligibleForEnvironmentLivenessAnalysis(var, index, env)) {
       HEnvironmentMarker* lookup =
           Add<HEnvironmentMarker>(HEnvironmentMarker::LOOKUP, index);
       USE(lookup);
@@ -2397,7 +2392,7 @@
       lookup->set_closure(env->closure());
 #endif
     }
-    return value;
+    return env->Lookup(index);
   }
 
   // The value of the arguments object is allowed in some but not most value
@@ -2477,8 +2472,10 @@
                        HValue* implicit_return_value);
   bool TryInlineIndirectCall(Handle<JSFunction> function, Call* expr,
                              int arguments_count);
-  bool TryInlineBuiltinMethodCall(Call* expr, Handle<JSFunction> function,
-                                  Handle<Map> receiver_map,
+  bool TryInlineBuiltinGetterCall(Handle<JSFunction> function,
+                                  Handle<Map> receiver_map, BailoutId ast_id);
+  bool TryInlineBuiltinMethodCall(Handle<JSFunction> function,
+                                  Handle<Map> receiver_map, BailoutId ast_id,
                                   int args_count_no_receiver);
   bool TryInlineBuiltinFunctionCall(Call* expr);
   enum ApiCallType {
@@ -2622,20 +2619,6 @@
       return false;
     }
 
-    bool IsJSArrayBufferViewFieldAccessor() {
-      int offset;  // unused
-      return Accessors::IsJSArrayBufferViewFieldAccessor(map_, name_, &offset);
-    }
-
-    bool GetJSArrayBufferViewFieldAccess(HObjectAccess* access) {
-      int offset;
-      if (Accessors::IsJSArrayBufferViewFieldAccessor(map_, name_, &offset)) {
-        *access = HObjectAccess::ForMapAndOffset(map_, offset);
-        return true;
-      }
-      return false;
-    }
-
     bool has_holder() { return !holder_.is_null(); }
     bool IsLoad() const { return access_type_ == LOAD; }
 
diff --git a/src/crankshaft/ia32/lithium-codegen-ia32.cc b/src/crankshaft/ia32/lithium-codegen-ia32.cc
index fa0a897..e4854e7 100644
--- a/src/crankshaft/ia32/lithium-codegen-ia32.cc
+++ b/src/crankshaft/ia32/lithium-codegen-ia32.cc
@@ -15,7 +15,6 @@
 #include "src/ia32/frames-ia32.h"
 #include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
-#include "src/profiler/cpu-profiler.h"
 
 namespace v8 {
 namespace internal {
@@ -734,7 +733,7 @@
                                             !frame_is_built_);
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
-    if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+    if (FLAG_trace_deopt || isolate()->is_profiling() ||
         jump_table_.is_empty() ||
         !table_entry.IsEquivalentTo(jump_table_.last())) {
       jump_table_.Add(table_entry, zone());
@@ -811,7 +810,6 @@
 void LCodeGen::RecordAndWritePosition(int position) {
   if (position == RelocInfo::kNoPosition) return;
   masm()->positions_recorder()->RecordPosition(position);
-  masm()->positions_recorder()->WriteRecordedPositions();
 }
 
 
@@ -2317,10 +2315,10 @@
   DeoptimizeIf(equal, instr, Deoptimizer::kProxy);
 
   __ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
-  __ cmp(object_prototype, prototype);
-  EmitTrueBranch(instr, equal);
   __ cmp(object_prototype, factory()->null_value());
   EmitFalseBranch(instr, equal);
+  __ cmp(object_prototype, prototype);
+  EmitTrueBranch(instr, equal);
   __ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
   __ jmp(&loop);
 }
@@ -2418,15 +2416,12 @@
 
 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->global_object())
-             .is(LoadDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->result()).is(eax));
 
-  __ mov(LoadDescriptor::NameRegister(), instr->name());
   EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
-                        isolate(), instr->typeof_mode(), PREMONOMORPHIC)
-                        .code();
+  Handle<Code> ic =
+      CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
+          .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2541,10 +2536,7 @@
 
   __ mov(LoadDescriptor::NameRegister(), instr->name());
   EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
-                        isolate(), NOT_INSIDE_TYPEOF,
-                        instr->hydrogen()->initialization_state())
-                        .code();
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2775,13 +2767,9 @@
   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-  }
+  EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
 
-  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
-                        isolate(), instr->hydrogen()->initialization_state())
-                        .code();
+  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3407,31 +3395,18 @@
 
 
 void LCodeGen::DoMathLog(LMathLog* instr) {
-  DCHECK(instr->value()->Equals(instr->result()));
-  XMMRegister input_reg = ToDoubleRegister(instr->value());
-  XMMRegister xmm_scratch = double_scratch0();
-  Label positive, done, zero;
-  __ xorps(xmm_scratch, xmm_scratch);
-  __ ucomisd(input_reg, xmm_scratch);
-  __ j(above, &positive, Label::kNear);
-  __ j(not_carry, &zero, Label::kNear);
-  __ pcmpeqd(input_reg, input_reg);
-  __ jmp(&done, Label::kNear);
-  __ bind(&zero);
-  ExternalReference ninf =
-      ExternalReference::address_of_negative_infinity();
-  __ movsd(input_reg, Operand::StaticVariable(ninf));
-  __ jmp(&done, Label::kNear);
-  __ bind(&positive);
-  __ fldln2();
-  __ sub(Operand(esp), Immediate(kDoubleSize));
-  __ movsd(Operand(esp, 0), input_reg);
-  __ fld_d(Operand(esp, 0));
-  __ fyl2x();
+  XMMRegister input = ToDoubleRegister(instr->value());
+  XMMRegister result = ToDoubleRegister(instr->result());
+  // Pass one double as argument on the stack.
+  __ PrepareCallCFunction(2, eax);
+  __ movsd(Operand(esp, 0 * kDoubleSize), input);
+  __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 2);
+  // Return value is in st(0) on ia32.
+  // Store it into the result register.
+  __ sub(esp, Immediate(kDoubleSize));
   __ fstp_d(Operand(esp, 0));
-  __ movsd(input_reg, Operand(esp, 0));
-  __ add(Operand(esp), Immediate(kDoubleSize));
-  __ bind(&done);
+  __ movsd(result, Operand(esp, 0));
+  __ add(esp, Immediate(kDoubleSize));
 }
 
 
@@ -3442,15 +3417,49 @@
   __ Lzcnt(result, input);
 }
 
+void LCodeGen::DoMathCos(LMathCos* instr) {
+  XMMRegister input = ToDoubleRegister(instr->value());
+  XMMRegister result = ToDoubleRegister(instr->result());
+  // Pass one double as argument on the stack.
+  __ PrepareCallCFunction(2, eax);
+  __ movsd(Operand(esp, 0 * kDoubleSize), input);
+  __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 2);
+  // Return value is in st(0) on ia32.
+  // Store it into the result register.
+  __ sub(esp, Immediate(kDoubleSize));
+  __ fstp_d(Operand(esp, 0));
+  __ movsd(result, Operand(esp, 0));
+  __ add(esp, Immediate(kDoubleSize));
+}
+
+void LCodeGen::DoMathSin(LMathSin* instr) {
+  XMMRegister input = ToDoubleRegister(instr->value());
+  XMMRegister result = ToDoubleRegister(instr->result());
+  // Pass one double as argument on the stack.
+  __ PrepareCallCFunction(2, eax);
+  __ movsd(Operand(esp, 0 * kDoubleSize), input);
+  __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 2);
+  // Return value is in st(0) on ia32.
+  // Store it into the result register.
+  __ sub(esp, Immediate(kDoubleSize));
+  __ fstp_d(Operand(esp, 0));
+  __ movsd(result, Operand(esp, 0));
+  __ add(esp, Immediate(kDoubleSize));
+}
 
 void LCodeGen::DoMathExp(LMathExp* instr) {
   XMMRegister input = ToDoubleRegister(instr->value());
   XMMRegister result = ToDoubleRegister(instr->result());
-  XMMRegister temp0 = double_scratch0();
-  Register temp1 = ToRegister(instr->temp1());
-  Register temp2 = ToRegister(instr->temp2());
-
-  MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
+  // Pass one double as argument on the stack.
+  __ PrepareCallCFunction(2, eax);
+  __ movsd(Operand(esp, 0 * kDoubleSize), input);
+  __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 2);
+  // Return value is in st(0) on ia32.
+  // Store it into the result register.
+  __ sub(esp, Immediate(kDoubleSize));
+  __ fstp_d(Operand(esp, 0));
+  __ movsd(result, Operand(esp, 0));
+  __ add(esp, Immediate(kDoubleSize));
 }
 
 void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
@@ -3465,7 +3474,9 @@
 #endif
   if (FLAG_code_comments) {
     if (actual.is_reg()) {
-      Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+      Comment(";;; PrepareForTailCall, actual: %s {",
+              RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
+                  actual.reg().code()));
     } else {
       Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
     }
@@ -3535,14 +3546,7 @@
   DCHECK(ToRegister(instr->result()).is(eax));
 
   __ Move(eax, Immediate(instr->arity()));
-  if (instr->arity() == 1) {
-    // We only need the allocation site for the case we have a length argument.
-    // The case may bail out to the runtime, which will determine the correct
-    // elements kind with the site.
-    __ mov(ebx, instr->hydrogen()->site());
-  } else {
-    __ mov(ebx, isolate()->factory()->undefined_value());
-  }
+  __ mov(ebx, instr->hydrogen()->site());
 
   ElementsKind kind = instr->hydrogen()->elements_kind();
   AllocationSiteOverrideMode override_mode =
@@ -3576,7 +3580,7 @@
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
     __ bind(&done);
   } else {
-    ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+    ArrayNArgumentsConstructorStub stub(isolate());
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
@@ -3706,14 +3710,12 @@
   DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-  }
+  EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
 
   __ mov(StoreDescriptor::NameRegister(), instr->name());
-  Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
-                        isolate(), instr->language_mode(),
-                        instr->hydrogen()->initialization_state()).code();
+  Handle<Code> ic =
+      CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
+          .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3883,13 +3885,11 @@
   DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-  }
+  EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
 
   Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
-                        isolate(), instr->language_mode(),
-                        instr->hydrogen()->initialization_state()).code();
+                        isolate(), instr->language_mode())
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -4030,8 +4030,7 @@
     DCHECK(object_reg.is(eax));
     PushSafepointRegistersScope scope(this);
     __ mov(ebx, to_map);
-    bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
-    TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+    TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
     __ CallStub(&stub);
     RecordSafepointWithLazyDeopt(instr,
         RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -4823,25 +4822,6 @@
 }
 
 
-void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
-  Register hi_reg = ToRegister(instr->hi());
-  Register lo_reg = ToRegister(instr->lo());
-  XMMRegister result_reg = ToDoubleRegister(instr->result());
-
-  if (CpuFeatures::IsSupported(SSE4_1)) {
-    CpuFeatureScope scope2(masm(), SSE4_1);
-    __ movd(result_reg, lo_reg);
-    __ pinsrd(result_reg, hi_reg, 1);
-  } else {
-    XMMRegister xmm_scratch = double_scratch0();
-    __ movd(result_reg, hi_reg);
-    __ psllq(result_reg, 32);
-    __ movd(xmm_scratch, lo_reg);
-    __ orps(result_reg, xmm_scratch);
-  }
-}
-
-
 void LCodeGen::DoAllocate(LAllocate* instr) {
   class DeferredAllocate final : public LDeferredCode {
    public:
diff --git a/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc b/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc
index c3284df..be8251c 100644
--- a/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc
+++ b/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc
@@ -167,8 +167,7 @@
 
 Register LGapResolver::GetFreeRegisterNot(Register reg) {
   int skip_index = reg.is(no_reg) ? -1 : reg.code();
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
     int code = config->GetAllocatableGeneralCode(i);
     if (source_uses_[code] == 0 && destination_uses_[code] > 0 &&
@@ -183,8 +182,7 @@
 bool LGapResolver::HasBeenReset() {
   if (!moves_.is_empty()) return false;
   if (spilled_register_ >= 0) return false;
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
     int code = config->GetAllocatableGeneralCode(i);
     if (source_uses_[code] != 0) return false;
@@ -238,8 +236,7 @@
 
   // 3. Prefer to spill a register that is not used in any remaining move
   // because it will not need to be restored until the end.
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
     int code = config->GetAllocatableGeneralCode(i);
     if (source_uses_[code] == 0 && destination_uses_[code] == 0) {
diff --git a/src/crankshaft/ia32/lithium-ia32.cc b/src/crankshaft/ia32/lithium-ia32.cc
index 0bfdb0d..501ff47 100644
--- a/src/crankshaft/ia32/lithium-ia32.cc
+++ b/src/crankshaft/ia32/lithium-ia32.cc
@@ -1093,6 +1093,8 @@
 
 LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
   switch (instr->op()) {
+    case kMathCos:
+      return DoMathCos(instr);
     case kMathFloor:
       return DoMathFloor(instr);
     case kMathRound:
@@ -1111,6 +1113,8 @@
       return DoMathPowHalf(instr);
     case kMathClz32:
       return DoMathClz32(instr);
+    case kMathSin:
+      return DoMathSin(instr);
     default:
       UNREACHABLE();
       return NULL;
@@ -1177,15 +1181,25 @@
   return DefineAsRegister(result);
 }
 
+LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseRegisterAtStart(instr->value());
+  return MarkAsCall(DefineSameAsFirst(new (zone()) LMathCos(input)), instr);
+}
+
+LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseRegisterAtStart(instr->value());
+  return MarkAsCall(DefineSameAsFirst(new (zone()) LMathSin(input)), instr);
+}
 
 LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
   DCHECK(instr->representation().IsDouble());
   DCHECK(instr->value()->representation().IsDouble());
-  LOperand* value = UseTempRegister(instr->value());
-  LOperand* temp1 = TempRegister();
-  LOperand* temp2 = TempRegister();
-  LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
-  return DefineAsRegister(result);
+  LOperand* input = UseRegisterAtStart(instr->value());
+  return MarkAsCall(DefineSameAsFirst(new (zone()) LMathExp(input)), instr);
 }
 
 
@@ -1980,13 +1994,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
-  LOperand* lo = UseRegister(instr->lo());
-  LOperand* hi = UseRegister(instr->hi());
-  return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
-}
-
-
 LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
   LOperand* context = info()->IsStub() ? UseFixed(instr->context(), esi) : NULL;
   LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
@@ -2018,15 +2025,9 @@
 
 LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* global_object =
-      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
 
-  LLoadGlobalGeneric* result =
-      new(zone()) LLoadGlobalGeneric(context, global_object, vector);
+  LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -2074,10 +2075,7 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* object =
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
   LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
       context, object, vector);
   return MarkAsCall(DefineFixed(result, eax), instr);
@@ -2147,10 +2145,7 @@
   LOperand* object =
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
   LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
   LLoadKeyedGeneric* result =
       new(zone()) LLoadKeyedGeneric(context, object, key, vector);
   return MarkAsCall(DefineFixed(result, eax), instr);
@@ -2234,12 +2229,8 @@
   DCHECK(instr->key()->representation().IsTagged());
   DCHECK(instr->value()->representation().IsTagged());
 
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
-    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
-  }
+  LOperand* slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+  LOperand* vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
 
   LStoreKeyedGeneric* result = new (zone())
       LStoreKeyedGeneric(context, object, key, value, slot, vector);
@@ -2353,12 +2344,8 @@
   LOperand* object =
       UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
   LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
-    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
-  }
+  LOperand* slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+  LOperand* vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
 
   LStoreNamedGeneric* result =
       new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
diff --git a/src/crankshaft/ia32/lithium-ia32.h b/src/crankshaft/ia32/lithium-ia32.h
index d1d5a06..7dd22e0 100644
--- a/src/crankshaft/ia32/lithium-ia32.h
+++ b/src/crankshaft/ia32/lithium-ia32.h
@@ -57,7 +57,6 @@
   V(ConstantI)                               \
   V(ConstantS)                               \
   V(ConstantT)                               \
-  V(ConstructDouble)                         \
   V(Context)                                 \
   V(DebugBreak)                              \
   V(DeclareGlobals)                          \
@@ -102,6 +101,7 @@
   V(LoadRoot)                                \
   V(MathAbs)                                 \
   V(MathClz32)                               \
+  V(MathCos)                                 \
   V(MathExp)                                 \
   V(MathFloorD)                              \
   V(MathFloorI)                              \
@@ -111,6 +111,7 @@
   V(MathPowHalf)                             \
   V(MathRoundD)                              \
   V(MathRoundI)                              \
+  V(MathSin)                                 \
   V(MathSqrt)                                \
   V(MaybeGrowElements)                       \
   V(ModByConstI)                             \
@@ -912,21 +913,29 @@
   DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
 };
 
-
-class LMathExp final : public LTemplateInstruction<1, 1, 2> {
+class LMathCos final : public LTemplateInstruction<1, 1, 0> {
  public:
-  LMathExp(LOperand* value,
-           LOperand* temp1,
-           LOperand* temp2) {
-    inputs_[0] = value;
-    temps_[0] = temp1;
-    temps_[1] = temp2;
-    ExternalReference::InitializeMathExpData();
-  }
+  explicit LMathCos(LOperand* value) { inputs_[0] = value; }
 
   LOperand* value() { return inputs_[0]; }
-  LOperand* temp1() { return temps_[0]; }
-  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
+
+class LMathSin final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathSin(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
+};
+
+class LMathExp final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathExp(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
 };
@@ -1589,18 +1598,14 @@
   DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
 };
 
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
  public:
-  LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
-                     LOperand* vector) {
+  LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
     inputs_[0] = context;
-    inputs_[1] = global_object;
     temps_[0] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
-  LOperand* global_object() { return inputs_[1]; }
   LOperand* temp_vector() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
@@ -2355,20 +2360,6 @@
 };
 
 
-class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
- public:
-  LConstructDouble(LOperand* hi, LOperand* lo) {
-    inputs_[0] = hi;
-    inputs_[1] = lo;
-  }
-
-  LOperand* hi() { return inputs_[0]; }
-  LOperand* lo() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
-};
-
-
 class LAllocate final : public LTemplateInstruction<1, 2, 1> {
  public:
   LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
@@ -2552,6 +2543,8 @@
   LInstruction* DoMathFround(HUnaryMathOperation* instr);
   LInstruction* DoMathAbs(HUnaryMathOperation* instr);
   LInstruction* DoMathLog(HUnaryMathOperation* instr);
+  LInstruction* DoMathCos(HUnaryMathOperation* instr);
+  LInstruction* DoMathSin(HUnaryMathOperation* instr);
   LInstruction* DoMathExp(HUnaryMathOperation* instr);
   LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
   LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
diff --git a/src/crankshaft/lithium-allocator.cc b/src/crankshaft/lithium-allocator.cc
index 6155dc0..d17cd27 100644
--- a/src/crankshaft/lithium-allocator.cc
+++ b/src/crankshaft/lithium-allocator.cc
@@ -13,6 +13,8 @@
 namespace v8 {
 namespace internal {
 
+const auto GetRegConfig = RegisterConfiguration::Crankshaft;
+
 static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) {
   return a.Value() < b.Value() ? a : b;
 }
@@ -940,7 +942,7 @@
 
         if (instr->ClobbersRegisters()) {
           for (int i = 0; i < Register::kNumRegisters; ++i) {
-            if (Register::from_code(i).IsAllocatable()) {
+            if (GetRegConfig()->IsAllocatableGeneralCode(i)) {
               if (output == NULL || !output->IsRegister() ||
                   output->index() != i) {
                 LiveRange* range = FixedLiveRangeFor(i);
@@ -953,7 +955,7 @@
 
         if (instr->ClobbersDoubleRegisters(isolate())) {
           for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
-            if (DoubleRegister::from_code(i).IsAllocatable()) {
+            if (GetRegConfig()->IsAllocatableDoubleCode(i)) {
               if (output == NULL || !output->IsDoubleRegister() ||
                   output->index() != i) {
                 LiveRange* range = FixedDoubleLiveRangeFor(i);
@@ -1460,12 +1462,8 @@
 
 void LAllocator::AllocateGeneralRegisters() {
   LAllocatorPhase phase("L_Allocate general registers", this);
-  num_registers_ =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
-          ->num_allocatable_general_registers();
-  allocatable_register_codes_ =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
-          ->allocatable_general_codes();
+  num_registers_ = GetRegConfig()->num_allocatable_general_registers();
+  allocatable_register_codes_ = GetRegConfig()->allocatable_general_codes();
   mode_ = GENERAL_REGISTERS;
   AllocateRegisters();
 }
@@ -1473,12 +1471,8 @@
 
 void LAllocator::AllocateDoubleRegisters() {
   LAllocatorPhase phase("L_Allocate double registers", this);
-  num_registers_ =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
-          ->num_allocatable_double_registers();
-  allocatable_register_codes_ =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
-          ->allocatable_double_codes();
+  num_registers_ = GetRegConfig()->num_allocatable_double_registers();
+  allocatable_register_codes_ = GetRegConfig()->allocatable_double_codes();
   mode_ = DOUBLE_REGISTERS;
   AllocateRegisters();
 }
@@ -1596,9 +1590,9 @@
 
 const char* LAllocator::RegisterName(int allocation_index) {
   if (mode_ == GENERAL_REGISTERS) {
-    return Register::from_code(allocation_index).ToString();
+    return GetRegConfig()->GetGeneralRegisterName(allocation_index);
   } else {
-    return DoubleRegister::from_code(allocation_index).ToString();
+    return GetRegConfig()->GetDoubleRegisterName(allocation_index);
   }
 }
 
diff --git a/src/crankshaft/lithium.cc b/src/crankshaft/lithium.cc
index d34b04f..4b3e0bc 100644
--- a/src/crankshaft/lithium.cc
+++ b/src/crankshaft/lithium.cc
@@ -40,6 +40,7 @@
 namespace v8 {
 namespace internal {
 
+const auto GetRegConfig = RegisterConfiguration::Crankshaft;
 
 void LOperand::PrintTo(StringStream* stream) {
   LUnallocated* unalloc = NULL;
@@ -63,7 +64,7 @@
             stream->Add("(=invalid_reg#%d)", reg_index);
           } else {
             const char* register_name =
-                Register::from_code(reg_index).ToString();
+                GetRegConfig()->GetGeneralRegisterName(reg_index);
             stream->Add("(=%s)", register_name);
           }
           break;
@@ -74,7 +75,7 @@
             stream->Add("(=invalid_double_reg#%d)", reg_index);
           } else {
             const char* double_register_name =
-                DoubleRegister::from_code(reg_index).ToString();
+                GetRegConfig()->GetDoubleRegisterName(reg_index);
             stream->Add("(=%s)", double_register_name);
           }
           break;
@@ -110,7 +111,8 @@
       if (reg_index < 0 || reg_index >= Register::kNumRegisters) {
         stream->Add("(=invalid_reg#%d|R)", reg_index);
       } else {
-        stream->Add("[%s|R]", Register::from_code(reg_index).ToString());
+        stream->Add("[%s|R]",
+                    GetRegConfig()->GetGeneralRegisterName(reg_index));
       }
       break;
     }
@@ -119,7 +121,7 @@
       if (reg_index < 0 || reg_index >= DoubleRegister::kMaxNumRegisters) {
         stream->Add("(=invalid_double_reg#%d|R)", reg_index);
       } else {
-        stream->Add("[%s|R]", DoubleRegister::from_code(reg_index).ToString());
+        stream->Add("[%s|R]", GetRegConfig()->GetDoubleRegisterName(reg_index));
       }
       break;
     }
@@ -469,8 +471,7 @@
                                                  jit_handler_data));
 
     CodeGenerator::PrintCode(code, info());
-    DCHECK(!(info()->isolate()->serializer_enabled() &&
-             info()->GetMustNotHaveEagerFrame() &&
+    DCHECK(!(info()->GetMustNotHaveEagerFrame() &&
              generator.NeedsEagerFrame()));
     return code;
   }
diff --git a/src/crankshaft/mips/lithium-codegen-mips.cc b/src/crankshaft/mips/lithium-codegen-mips.cc
index bdc5c64..30a59fc 100644
--- a/src/crankshaft/mips/lithium-codegen-mips.cc
+++ b/src/crankshaft/mips/lithium-codegen-mips.cc
@@ -34,8 +34,6 @@
 #include "src/crankshaft/mips/lithium-gap-resolver-mips.h"
 #include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
-#include "src/profiler/cpu-profiler.h"
-
 
 namespace v8 {
 namespace internal {
@@ -801,7 +799,7 @@
                                             !frame_is_built_);
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
-    if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+    if (FLAG_trace_deopt || isolate()->is_profiling() ||
         jump_table_.is_empty() ||
         !table_entry.IsEquivalentTo(jump_table_.last())) {
       jump_table_.Add(table_entry, zone());
@@ -877,7 +875,6 @@
 void LCodeGen::RecordAndWritePosition(int position) {
   if (position == RelocInfo::kNoPosition) return;
   masm()->positions_recorder()->RecordPosition(position);
-  masm()->positions_recorder()->WriteRecordedPositions();
 }
 
 
@@ -2417,9 +2414,9 @@
                Operand(JS_PROXY_TYPE));
 
   __ lw(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
-  EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
   __ LoadRoot(at, Heap::kNullValueRootIndex);
   EmitFalseBranch(instr, eq, object_prototype, Operand(at));
+  EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
   __ Branch(USE_DELAY_SLOT, &loop);
   __ lw(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
 }
@@ -2515,15 +2512,12 @@
 
 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->global_object())
-             .is(LoadDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->result()).is(v0));
 
-  __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
   EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
-                        isolate(), instr->typeof_mode(), PREMONOMORPHIC)
-                        .code();
+  Handle<Code> ic =
+      CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
+          .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2622,10 +2616,7 @@
   // Name is always in a2.
   __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
   EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
-                        isolate(), NOT_INSIDE_TYPEOF,
-                        instr->hydrogen()->initialization_state())
-                        .code();
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2930,13 +2921,9 @@
   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-  }
+  EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
 
-  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
-                        isolate(), instr->hydrogen()->initialization_state())
-                        .code();
+  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3499,26 +3486,32 @@
   }
 }
 
+void LCodeGen::DoMathCos(LMathCos* instr) {
+  __ PrepareCallCFunction(0, 1, scratch0());
+  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+  __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
+  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
+
+void LCodeGen::DoMathSin(LMathSin* instr) {
+  __ PrepareCallCFunction(0, 1, scratch0());
+  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+  __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
+  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
 
 void LCodeGen::DoMathExp(LMathExp* instr) {
-  DoubleRegister input = ToDoubleRegister(instr->value());
-  DoubleRegister result = ToDoubleRegister(instr->result());
-  DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
-  DoubleRegister double_scratch2 = double_scratch0();
-  Register temp1 = ToRegister(instr->temp1());
-  Register temp2 = ToRegister(instr->temp2());
-
-  MathExpGenerator::EmitMathExp(
-      masm(), input, result, double_scratch1, double_scratch2,
-      temp1, temp2, scratch0());
+  __ PrepareCallCFunction(0, 1, scratch0());
+  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+  __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
+  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
 }
 
 
 void LCodeGen::DoMathLog(LMathLog* instr) {
   __ PrepareCallCFunction(0, 1, scratch0());
   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
-  __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
-                   0, 1);
+  __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
 }
 
@@ -3541,7 +3534,9 @@
 #endif
   if (FLAG_code_comments) {
     if (actual.is_reg()) {
-      Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+      Comment(";;; PrepareForTailCall, actual: %s {",
+              RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
+                  actual.reg().code()));
     } else {
       Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
     }
@@ -3648,14 +3643,8 @@
   DCHECK(ToRegister(instr->result()).is(v0));
 
   __ li(a0, Operand(instr->arity()));
-  if (instr->arity() == 1) {
-    // We only need the allocation site for the case we have a length argument.
-    // The case may bail out to the runtime, which will determine the correct
-    // elements kind with the site.
-    __ li(a2, instr->hydrogen()->site());
-  } else {
-    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
-  }
+  __ li(a2, instr->hydrogen()->site());
+
   ElementsKind kind = instr->hydrogen()->elements_kind();
   AllocationSiteOverrideMode override_mode =
       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
@@ -3687,7 +3676,7 @@
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
     __ bind(&done);
   } else {
-    ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+    ArrayNArgumentsConstructorStub stub(isolate());
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
@@ -3809,14 +3798,12 @@
   DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-  }
+  EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
 
   __ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
-  Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
-                        isolate(), instr->language_mode(),
-                        instr->hydrogen()->initialization_state()).code();
+  Handle<Code> ic =
+      CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
+          .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -4037,13 +4024,11 @@
   DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-  }
+  EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
 
   Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
-                        isolate(), instr->language_mode(),
-                        instr->hydrogen()->initialization_state()).code();
+                        isolate(), instr->language_mode())
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -4179,8 +4164,7 @@
     DCHECK(ToRegister(instr->context()).is(cp));
     PushSafepointRegistersScope scope(this);
     __ li(a1, Operand(to_map));
-    bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
-    TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+    TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
     __ CallStub(&stub);
     RecordSafepointWithRegisters(
         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
@@ -5028,14 +5012,6 @@
 }
 
 
-void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
-  Register hi_reg = ToRegister(instr->hi());
-  Register lo_reg = ToRegister(instr->lo());
-  DoubleRegister result_reg = ToDoubleRegister(instr->result());
-  __ Move(result_reg, lo_reg, hi_reg);
-}
-
-
 void LCodeGen::DoAllocate(LAllocate* instr) {
   class DeferredAllocate final : public LDeferredCode {
    public:
diff --git a/src/crankshaft/mips/lithium-mips.cc b/src/crankshaft/mips/lithium-mips.cc
index 345694d..e706620 100644
--- a/src/crankshaft/mips/lithium-mips.cc
+++ b/src/crankshaft/mips/lithium-mips.cc
@@ -1074,6 +1074,10 @@
       return DoMathAbs(instr);
     case kMathLog:
       return DoMathLog(instr);
+    case kMathCos:
+      return DoMathCos(instr);
+    case kMathSin:
+      return DoMathSin(instr);
     case kMathExp:
       return DoMathExp(instr);
     case kMathSqrt:
@@ -1103,16 +1107,25 @@
   return DefineAsRegister(result);
 }
 
+LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseFixedDouble(instr->value(), f4);
+  return MarkAsCall(DefineFixedDouble(new (zone()) LMathCos(input), f4), instr);
+}
+
+LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseFixedDouble(instr->value(), f4);
+  return MarkAsCall(DefineFixedDouble(new (zone()) LMathSin(input), f4), instr);
+}
 
 LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
   DCHECK(instr->representation().IsDouble());
   DCHECK(instr->value()->representation().IsDouble());
-  LOperand* input = UseRegister(instr->value());
-  LOperand* temp1 = TempRegister();
-  LOperand* temp2 = TempRegister();
-  LOperand* double_temp = TempDoubleRegister();
-  LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
-  return DefineAsRegister(result);
+  LOperand* input = UseFixedDouble(instr->value(), f4);
+  return MarkAsCall(DefineFixedDouble(new (zone()) LMathExp(input), f4), instr);
 }
 
 
@@ -1921,13 +1934,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
-  LOperand* lo = UseRegister(instr->lo());
-  LOperand* hi = UseRegister(instr->hi());
-  return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
-}
-
-
 LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
   LOperand* context = info()->IsStub()
       ? UseFixed(instr->context(), cp)
@@ -1959,14 +1965,9 @@
 
 LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* global_object =
-      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
-  LLoadGlobalGeneric* result =
-      new(zone()) LLoadGlobalGeneric(context, global_object, vector);
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+
+  LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
   return MarkAsCall(DefineFixed(result, v0), instr);
 }
 
@@ -2010,10 +2011,7 @@
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* object =
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
 
   LInstruction* result =
       DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), v0);
@@ -2085,10 +2083,7 @@
   LOperand* object =
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
   LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
 
   LInstruction* result =
       DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
@@ -2150,12 +2145,8 @@
   DCHECK(instr->key()->representation().IsTagged());
   DCHECK(instr->value()->representation().IsTagged());
 
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
-    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
-  }
+  LOperand* slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+  LOperand* vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
 
   LStoreKeyedGeneric* result =
       new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
@@ -2244,12 +2235,8 @@
   LOperand* obj =
       UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
   LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
-    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
-  }
+  LOperand* slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+  LOperand* vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
 
   LStoreNamedGeneric* result =
       new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
diff --git a/src/crankshaft/mips/lithium-mips.h b/src/crankshaft/mips/lithium-mips.h
index ea5e792..f220a15 100644
--- a/src/crankshaft/mips/lithium-mips.h
+++ b/src/crankshaft/mips/lithium-mips.h
@@ -53,7 +53,6 @@
   V(ConstantI)                               \
   V(ConstantS)                               \
   V(ConstantT)                               \
-  V(ConstructDouble)                         \
   V(Context)                                 \
   V(DebugBreak)                              \
   V(DeclareGlobals)                          \
@@ -97,6 +96,8 @@
   V(LoadNamedField)                          \
   V(LoadNamedGeneric)                        \
   V(MathAbs)                                 \
+  V(MathCos)                                 \
+  V(MathSin)                                 \
   V(MathExp)                                 \
   V(MathClz32)                               \
   V(MathFloor)                               \
@@ -882,24 +883,29 @@
   DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
 };
 
-
-class LMathExp final : public LTemplateInstruction<1, 1, 3> {
+class LMathCos final : public LTemplateInstruction<1, 1, 0> {
  public:
-  LMathExp(LOperand* value,
-           LOperand* double_temp,
-           LOperand* temp1,
-           LOperand* temp2) {
-    inputs_[0] = value;
-    temps_[0] = temp1;
-    temps_[1] = temp2;
-    temps_[2] = double_temp;
-    ExternalReference::InitializeMathExpData();
-  }
+  explicit LMathCos(LOperand* value) { inputs_[0] = value; }
 
   LOperand* value() { return inputs_[0]; }
-  LOperand* temp1() { return temps_[0]; }
-  LOperand* temp2() { return temps_[1]; }
-  LOperand* double_temp() { return temps_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
+
+class LMathSin final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathSin(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
+};
+
+class LMathExp final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathExp(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
 };
@@ -1531,18 +1537,14 @@
   DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
 };
 
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
  public:
-  LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
-                     LOperand* vector) {
+  LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
     inputs_[0] = context;
-    inputs_[1] = global_object;
     temps_[0] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
-  LOperand* global_object() { return inputs_[1]; }
   LOperand* temp_vector() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
@@ -2299,20 +2301,6 @@
 };
 
 
-class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
- public:
-  LConstructDouble(LOperand* hi, LOperand* lo) {
-    inputs_[0] = hi;
-    inputs_[1] = lo;
-  }
-
-  LOperand* hi() { return inputs_[0]; }
-  LOperand* lo() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
-};
-
-
 class LAllocate final : public LTemplateInstruction<1, 2, 2> {
  public:
   LAllocate(LOperand* context,
@@ -2503,6 +2491,8 @@
   LInstruction* DoMathFround(HUnaryMathOperation* instr);
   LInstruction* DoMathAbs(HUnaryMathOperation* instr);
   LInstruction* DoMathLog(HUnaryMathOperation* instr);
+  LInstruction* DoMathCos(HUnaryMathOperation* instr);
+  LInstruction* DoMathSin(HUnaryMathOperation* instr);
   LInstruction* DoMathExp(HUnaryMathOperation* instr);
   LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
   LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
diff --git a/src/crankshaft/mips64/lithium-codegen-mips64.cc b/src/crankshaft/mips64/lithium-codegen-mips64.cc
index d114e4f..41ed95e 100644
--- a/src/crankshaft/mips64/lithium-codegen-mips64.cc
+++ b/src/crankshaft/mips64/lithium-codegen-mips64.cc
@@ -10,7 +10,6 @@
 #include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h"
 #include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
-#include "src/profiler/cpu-profiler.h"
 
 namespace v8 {
 namespace internal {
@@ -789,7 +788,7 @@
             entry, deopt_info, bailout_type, !frame_is_built_);
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
-    if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+    if (FLAG_trace_deopt || isolate()->is_profiling() ||
         jump_table_.is_empty() ||
         !table_entry->IsEquivalentTo(*jump_table_.last())) {
       jump_table_.Add(table_entry, zone());
@@ -865,7 +864,6 @@
 void LCodeGen::RecordAndWritePosition(int position) {
   if (position == RelocInfo::kNoPosition) return;
   masm()->positions_recorder()->RecordPosition(position);
-  masm()->positions_recorder()->WriteRecordedPositions();
 }
 
 
@@ -1167,7 +1165,7 @@
   DCHECK(!result.is(dividend) || !scratch.is(dividend));
 
   // If the divisor is 1, return the dividend.
-  if (divisor == 1) {
+  if (divisor == 0) {
     __ Move(result, dividend);
     return;
   }
@@ -2536,9 +2534,9 @@
                Operand(JS_PROXY_TYPE));
 
   __ ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
-  EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
   __ LoadRoot(at, Heap::kNullValueRootIndex);
   EmitFalseBranch(instr, eq, object_prototype, Operand(at));
+  EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
   __ Branch(&loop, USE_DELAY_SLOT);
   __ ld(object_map, FieldMemOperand(object_prototype,
                                     HeapObject::kMapOffset));  // In delay slot.
@@ -2635,15 +2633,12 @@
 
 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->global_object())
-            .is(LoadDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->result()).is(v0));
 
-  __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
   EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
-                        isolate(), instr->typeof_mode(), PREMONOMORPHIC)
-                        .code();
+  Handle<Code> ic =
+      CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
+          .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2757,10 +2752,7 @@
   // Name is always in a2.
   __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
   EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
-                        isolate(), NOT_INSIDE_TYPEOF,
-                        instr->hydrogen()->initialization_state())
-                        .code();
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3109,13 +3101,9 @@
   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-  }
+  EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
 
-  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
-                        isolate(), instr->hydrogen()->initialization_state())
-                        .code();
+  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3701,26 +3689,32 @@
   }
 }
 
+void LCodeGen::DoMathCos(LMathCos* instr) {
+  __ PrepareCallCFunction(0, 1, scratch0());
+  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+  __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
+  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
+
+void LCodeGen::DoMathSin(LMathSin* instr) {
+  __ PrepareCallCFunction(0, 1, scratch0());
+  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+  __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
+  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
 
 void LCodeGen::DoMathExp(LMathExp* instr) {
-  DoubleRegister input = ToDoubleRegister(instr->value());
-  DoubleRegister result = ToDoubleRegister(instr->result());
-  DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
-  DoubleRegister double_scratch2 = double_scratch0();
-  Register temp1 = ToRegister(instr->temp1());
-  Register temp2 = ToRegister(instr->temp2());
-
-  MathExpGenerator::EmitMathExp(
-      masm(), input, result, double_scratch1, double_scratch2,
-      temp1, temp2, scratch0());
+  __ PrepareCallCFunction(0, 1, scratch0());
+  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+  __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
+  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
 }
 
 
 void LCodeGen::DoMathLog(LMathLog* instr) {
   __ PrepareCallCFunction(0, 1, scratch0());
   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
-  __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
-                   0, 1);
+  __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
 }
 
@@ -3743,7 +3737,9 @@
 #endif
   if (FLAG_code_comments) {
     if (actual.is_reg()) {
-      Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+      Comment(";;; PrepareForTailCall, actual: %s {",
+              RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
+                  actual.reg().code()));
     } else {
       Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
     }
@@ -3847,14 +3843,8 @@
   DCHECK(ToRegister(instr->result()).is(v0));
 
   __ li(a0, Operand(instr->arity()));
-  if (instr->arity() == 1) {
-    // We only need the allocation site for the case we have a length argument.
-    // The case may bail out to the runtime, which will determine the correct
-    // elements kind with the site.
-    __ li(a2, instr->hydrogen()->site());
-  } else {
-    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
-  }
+  __ li(a2, instr->hydrogen()->site());
+
   ElementsKind kind = instr->hydrogen()->elements_kind();
   AllocationSiteOverrideMode override_mode =
       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
@@ -3886,7 +3876,7 @@
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
     __ bind(&done);
   } else {
-    ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+    ArrayNArgumentsConstructorStub stub(isolate());
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
@@ -4017,14 +4007,12 @@
   DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-  }
+  EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
 
   __ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
-  Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
-                        isolate(), instr->language_mode(),
-                        instr->hydrogen()->initialization_state()).code();
+  Handle<Code> ic =
+      CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
+          .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -4267,13 +4255,11 @@
   DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-  }
+  EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
 
   Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
-                        isolate(), instr->language_mode(),
-                        instr->hydrogen()->initialization_state()).code();
+                        isolate(), instr->language_mode())
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -4409,8 +4395,7 @@
     DCHECK(ToRegister(instr->context()).is(cp));
     PushSafepointRegistersScope scope(this);
     __ li(a1, Operand(to_map));
-    bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
-    TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+    TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
     __ CallStub(&stub);
     RecordSafepointWithRegisters(
         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
@@ -5230,14 +5215,6 @@
 }
 
 
-void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
-  Register hi_reg = ToRegister(instr->hi());
-  Register lo_reg = ToRegister(instr->lo());
-  DoubleRegister result_reg = ToDoubleRegister(instr->result());
-  __ Move(result_reg, lo_reg, hi_reg);
-}
-
-
 void LCodeGen::DoAllocate(LAllocate* instr) {
   class DeferredAllocate final : public LDeferredCode {
    public:
diff --git a/src/crankshaft/mips64/lithium-mips64.cc b/src/crankshaft/mips64/lithium-mips64.cc
index 3ee9ab6..b682d19 100644
--- a/src/crankshaft/mips64/lithium-mips64.cc
+++ b/src/crankshaft/mips64/lithium-mips64.cc
@@ -1074,6 +1074,10 @@
       return DoMathAbs(instr);
     case kMathLog:
       return DoMathLog(instr);
+    case kMathCos:
+      return DoMathCos(instr);
+    case kMathSin:
+      return DoMathSin(instr);
     case kMathExp:
       return DoMathExp(instr);
     case kMathSqrt:
@@ -1103,16 +1107,25 @@
   return DefineAsRegister(result);
 }
 
+LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseFixedDouble(instr->value(), f4);
+  return MarkAsCall(DefineFixedDouble(new (zone()) LMathCos(input), f4), instr);
+}
+
+LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseFixedDouble(instr->value(), f4);
+  return MarkAsCall(DefineFixedDouble(new (zone()) LMathSin(input), f4), instr);
+}
 
 LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
   DCHECK(instr->representation().IsDouble());
   DCHECK(instr->value()->representation().IsDouble());
-  LOperand* input = UseRegister(instr->value());
-  LOperand* temp1 = TempRegister();
-  LOperand* temp2 = TempRegister();
-  LOperand* double_temp = TempDoubleRegister();
-  LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
-  return DefineAsRegister(result);
+  LOperand* input = UseFixedDouble(instr->value(), f4);
+  return MarkAsCall(DefineFixedDouble(new (zone()) LMathExp(input), f4), instr);
 }
 
 
@@ -1924,13 +1937,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
-  LOperand* lo = UseRegister(instr->lo());
-  LOperand* hi = UseRegister(instr->hi());
-  return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
-}
-
-
 LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
   LOperand* context = info()->IsStub()
       ? UseFixed(instr->context(), cp)
@@ -1962,14 +1968,9 @@
 
 LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* global_object =
-      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
-  LLoadGlobalGeneric* result =
-      new(zone()) LLoadGlobalGeneric(context, global_object, vector);
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+
+  LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
   return MarkAsCall(DefineFixed(result, v0), instr);
 }
 
@@ -2013,10 +2014,7 @@
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* object =
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
 
   LInstruction* result =
       DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), v0);
@@ -2089,10 +2087,7 @@
   LOperand* object =
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
   LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
 
   LInstruction* result =
       DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
@@ -2155,12 +2150,8 @@
   DCHECK(instr->key()->representation().IsTagged());
   DCHECK(instr->value()->representation().IsTagged());
 
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
-    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
-  }
+  LOperand* slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+  LOperand* vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
 
   LStoreKeyedGeneric* result =
       new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
@@ -2249,12 +2240,8 @@
   LOperand* obj =
       UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
   LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
-    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
-  }
+  LOperand* slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+  LOperand* vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
 
   LStoreNamedGeneric* result =
       new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
diff --git a/src/crankshaft/mips64/lithium-mips64.h b/src/crankshaft/mips64/lithium-mips64.h
index 5d282ec..ad3fb87 100644
--- a/src/crankshaft/mips64/lithium-mips64.h
+++ b/src/crankshaft/mips64/lithium-mips64.h
@@ -55,7 +55,6 @@
   V(ConstantI)                               \
   V(ConstantS)                               \
   V(ConstantT)                               \
-  V(ConstructDouble)                         \
   V(Context)                                 \
   V(DebugBreak)                              \
   V(DeclareGlobals)                          \
@@ -99,6 +98,8 @@
   V(LoadNamedField)                          \
   V(LoadNamedGeneric)                        \
   V(MathAbs)                                 \
+  V(MathCos)                                 \
+  V(MathSin)                                 \
   V(MathExp)                                 \
   V(MathClz32)                               \
   V(MathFloor)                               \
@@ -900,24 +901,29 @@
   DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
 };
 
-
-class LMathExp final : public LTemplateInstruction<1, 1, 3> {
+class LMathCos final : public LTemplateInstruction<1, 1, 0> {
  public:
-  LMathExp(LOperand* value,
-           LOperand* double_temp,
-           LOperand* temp1,
-           LOperand* temp2) {
-    inputs_[0] = value;
-    temps_[0] = temp1;
-    temps_[1] = temp2;
-    temps_[2] = double_temp;
-    ExternalReference::InitializeMathExpData();
-  }
+  explicit LMathCos(LOperand* value) { inputs_[0] = value; }
 
   LOperand* value() { return inputs_[0]; }
-  LOperand* temp1() { return temps_[0]; }
-  LOperand* temp2() { return temps_[1]; }
-  LOperand* double_temp() { return temps_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
+
+class LMathSin final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathSin(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
+};
+
+class LMathExp final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathExp(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
 };
@@ -1593,18 +1599,14 @@
   DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
 };
 
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
  public:
-  LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
-                     LOperand* vector) {
+  LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
     inputs_[0] = context;
-    inputs_[1] = global_object;
     temps_[0] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
-  LOperand* global_object() { return inputs_[1]; }
   LOperand* temp_vector() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
@@ -2345,20 +2347,6 @@
 };
 
 
-class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
- public:
-  LConstructDouble(LOperand* hi, LOperand* lo) {
-    inputs_[0] = hi;
-    inputs_[1] = lo;
-  }
-
-  LOperand* hi() { return inputs_[0]; }
-  LOperand* lo() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
-};
-
-
 class LAllocate final : public LTemplateInstruction<1, 2, 2> {
  public:
   LAllocate(LOperand* context,
@@ -2549,6 +2537,8 @@
   LInstruction* DoMathFround(HUnaryMathOperation* instr);
   LInstruction* DoMathAbs(HUnaryMathOperation* instr);
   LInstruction* DoMathLog(HUnaryMathOperation* instr);
+  LInstruction* DoMathCos(HUnaryMathOperation* instr);
+  LInstruction* DoMathSin(HUnaryMathOperation* instr);
   LInstruction* DoMathExp(HUnaryMathOperation* instr);
   LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
   LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
diff --git a/src/crankshaft/ppc/OWNERS b/src/crankshaft/ppc/OWNERS
index eb007cb..752e8e3 100644
--- a/src/crankshaft/ppc/OWNERS
+++ b/src/crankshaft/ppc/OWNERS
@@ -3,3 +3,4 @@
 joransiu@ca.ibm.com
 mbrandy@us.ibm.com
 michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/src/crankshaft/ppc/lithium-codegen-ppc.cc b/src/crankshaft/ppc/lithium-codegen-ppc.cc
index 31d9ebe..056c93a 100644
--- a/src/crankshaft/ppc/lithium-codegen-ppc.cc
+++ b/src/crankshaft/ppc/lithium-codegen-ppc.cc
@@ -11,7 +11,6 @@
 #include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h"
 #include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
-#include "src/profiler/cpu-profiler.h"
 
 namespace v8 {
 namespace internal {
@@ -766,7 +765,7 @@
                                             !frame_is_built_);
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
-    if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+    if (FLAG_trace_deopt || isolate()->is_profiling() ||
         jump_table_.is_empty() ||
         !table_entry.IsEquivalentTo(jump_table_.last())) {
       jump_table_.Add(table_entry, zone());
@@ -837,7 +836,6 @@
 void LCodeGen::RecordAndWritePosition(int position) {
   if (position == RelocInfo::kNoPosition) return;
   masm()->positions_recorder()->RecordPosition(position);
-  masm()->positions_recorder()->WriteRecordedPositions();
 }
 
 
@@ -1140,6 +1138,10 @@
     }
   }
 
+#if V8_TARGET_ARCH_PPC64
+  __ extsw(result, result);
+#endif
+
   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
     // Deoptimize if remainder is not 0.
     Register scratch = scratch0();
@@ -1333,6 +1335,9 @@
   // We performed a truncating division. Correct the result.
   __ subi(result, result, Operand(1));
   __ bind(&done);
+#if V8_TARGET_ARCH_PPC64
+  __ extsw(result, result);
+#endif
 }
 
 
@@ -2583,10 +2588,10 @@
   DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
   __ LoadP(object_prototype,
            FieldMemOperand(object_map, Map::kPrototypeOffset));
-  __ cmp(object_prototype, prototype);
-  EmitTrueBranch(instr, eq);
   __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
   EmitFalseBranch(instr, eq);
+  __ cmp(object_prototype, prototype);
+  EmitTrueBranch(instr, eq);
   __ LoadP(object_map,
            FieldMemOperand(object_prototype, HeapObject::kMapOffset));
   __ b(&loop);
@@ -2692,15 +2697,12 @@
 
 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->global_object())
-             .is(LoadDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->result()).is(r3));
 
-  __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
   EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
-                        isolate(), instr->typeof_mode(), PREMONOMORPHIC)
-                        .code();
+  Handle<Code> ic =
+      CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
+          .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2815,10 +2817,7 @@
   // Name is always in r5.
   __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
   EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
-                        isolate(), NOT_INSIDE_TYPEOF,
-                        instr->hydrogen()->initialization_state())
-                        .code();
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3176,14 +3175,9 @@
   DCHECK(ToRegister(instr->context()).is(cp));
   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
+  EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-  }
-
-  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
-                        isolate(), instr->hydrogen()->initialization_state())
-                        .code();
+  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3776,29 +3770,34 @@
   }
 }
 
-
-void LCodeGen::DoMathExp(LMathExp* instr) {
-  DoubleRegister input = ToDoubleRegister(instr->value());
-  DoubleRegister result = ToDoubleRegister(instr->result());
-  DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
-  DoubleRegister double_scratch2 = double_scratch0();
-  Register temp1 = ToRegister(instr->temp1());
-  Register temp2 = ToRegister(instr->temp2());
-
-  MathExpGenerator::EmitMathExp(masm(), input, result, double_scratch1,
-                                double_scratch2, temp1, temp2, scratch0());
+void LCodeGen::DoMathCos(LMathCos* instr) {
+  __ PrepareCallCFunction(0, 1, scratch0());
+  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+  __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
+  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
 }
 
+void LCodeGen::DoMathSin(LMathSin* instr) {
+  __ PrepareCallCFunction(0, 1, scratch0());
+  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+  __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
+  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+  __ PrepareCallCFunction(0, 1, scratch0());
+  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+  __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
+  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
 
 void LCodeGen::DoMathLog(LMathLog* instr) {
   __ PrepareCallCFunction(0, 1, scratch0());
   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
-  __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 0,
-                   1);
+  __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
 }
 
-
 void LCodeGen::DoMathClz32(LMathClz32* instr) {
   Register input = ToRegister(instr->value());
   Register result = ToRegister(instr->result());
@@ -3817,7 +3816,9 @@
 #endif
   if (FLAG_code_comments) {
     if (actual.is_reg()) {
-      Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+      Comment(";;; PrepareForTailCall, actual: %s {",
+              RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
+                  actual.reg().code()));
     } else {
       Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
     }
@@ -3923,14 +3924,8 @@
   DCHECK(ToRegister(instr->result()).is(r3));
 
   __ mov(r3, Operand(instr->arity()));
-  if (instr->arity() == 1) {
-    // We only need the allocation site for the case we have a length argument.
-    // The case may bail out to the runtime, which will determine the correct
-    // elements kind with the site.
-    __ Move(r5, instr->hydrogen()->site());
-  } else {
-    __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
-  }
+  __ Move(r5, instr->hydrogen()->site());
+
   ElementsKind kind = instr->hydrogen()->elements_kind();
   AllocationSiteOverrideMode override_mode =
       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
@@ -3962,7 +3957,7 @@
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
     __ bind(&done);
   } else {
-    ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+    ArrayNArgumentsConstructorStub stub(isolate());
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
@@ -4097,14 +4092,12 @@
   DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-  }
+  EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
 
   __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
-  Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
-                        isolate(), instr->language_mode(),
-                        instr->hydrogen()->initialization_state()).code();
+  Handle<Code> ic =
+      CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
+          .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -4359,13 +4352,11 @@
   DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-  }
+  EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
 
   Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
-                        isolate(), instr->language_mode(),
-                        instr->hydrogen()->initialization_state()).code();
+                        isolate(), instr->language_mode())
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -4498,8 +4489,7 @@
     DCHECK(object_reg.is(r3));
     PushSafepointRegistersScope scope(this);
     __ Move(r4, to_map);
-    bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
-    TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+    TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
     __ CallStub(&stub);
     RecordSafepointWithRegisters(instr->pointer_map(), 0,
                                  Safepoint::kLazyDeopt);
@@ -5317,18 +5307,6 @@
 }
 
 
-void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
-  Register hi_reg = ToRegister(instr->hi());
-  Register lo_reg = ToRegister(instr->lo());
-  DoubleRegister result_reg = ToDoubleRegister(instr->result());
-#if V8_TARGET_ARCH_PPC64
-  __ MovInt64ComponentsToDouble(result_reg, hi_reg, lo_reg, r0);
-#else
-  __ MovInt64ToDouble(result_reg, hi_reg, lo_reg);
-#endif
-}
-
-
 void LCodeGen::DoAllocate(LAllocate* instr) {
   class DeferredAllocate final : public LDeferredCode {
    public:
diff --git a/src/crankshaft/ppc/lithium-ppc.cc b/src/crankshaft/ppc/lithium-ppc.cc
index 0d9a617..81b2205 100644
--- a/src/crankshaft/ppc/lithium-ppc.cc
+++ b/src/crankshaft/ppc/lithium-ppc.cc
@@ -1080,6 +1080,10 @@
       return DoMathAbs(instr);
     case kMathLog:
       return DoMathLog(instr);
+    case kMathCos:
+      return DoMathCos(instr);
+    case kMathSin:
+      return DoMathSin(instr);
     case kMathExp:
       return DoMathExp(instr);
     case kMathSqrt:
@@ -1146,8 +1150,8 @@
 LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
   DCHECK(instr->representation().IsDouble());
   DCHECK(instr->value()->representation().IsDouble());
-  LOperand* input = UseFixedDouble(instr->value(), d1);
-  return MarkAsCall(DefineFixedDouble(new (zone()) LMathLog(input), d1), instr);
+  LOperand* input = UseFixedDouble(instr->value(), d0);
+  return MarkAsCall(DefineFixedDouble(new (zone()) LMathLog(input), d0), instr);
 }
 
 
@@ -1157,16 +1161,25 @@
   return DefineAsRegister(result);
 }
 
+LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseFixedDouble(instr->value(), d0);
+  return MarkAsCall(DefineFixedDouble(new (zone()) LMathCos(input), d0), instr);
+}
+
+LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseFixedDouble(instr->value(), d0);
+  return MarkAsCall(DefineFixedDouble(new (zone()) LMathSin(input), d0), instr);
+}
 
 LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
   DCHECK(instr->representation().IsDouble());
   DCHECK(instr->value()->representation().IsDouble());
-  LOperand* input = UseRegister(instr->value());
-  LOperand* temp1 = TempRegister();
-  LOperand* temp2 = TempRegister();
-  LOperand* double_temp = TempDoubleRegister();
-  LMathExp* result = new (zone()) LMathExp(input, double_temp, temp1, temp2);
-  return DefineAsRegister(result);
+  LOperand* input = UseFixedDouble(instr->value(), d0);
+  return MarkAsCall(DefineFixedDouble(new (zone()) LMathExp(input), d0), instr);
 }
 
 
@@ -1951,13 +1964,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
-  LOperand* lo = UseRegister(instr->lo());
-  LOperand* hi = UseRegister(instr->hi());
-  return DefineAsRegister(new (zone()) LConstructDouble(hi, lo));
-}
-
-
 LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
   LOperand* context = info()->IsStub() ? UseFixed(instr->context(), cp) : NULL;
   LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
@@ -1987,14 +1993,9 @@
 
 LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* global_object =
-      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
-  LLoadGlobalGeneric* result =
-      new (zone()) LLoadGlobalGeneric(context, global_object, vector);
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+
+  LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
   return MarkAsCall(DefineFixed(result, r3), instr);
 }
 
@@ -2038,10 +2039,7 @@
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* object =
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
 
   LInstruction* result =
       DefineFixed(new (zone()) LLoadNamedGeneric(context, object, vector), r3);
@@ -2111,10 +2109,7 @@
   LOperand* object =
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
   LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
 
   LInstruction* result = DefineFixed(
       new (zone()) LLoadKeyedGeneric(context, object, key, vector), r3);
@@ -2173,12 +2168,8 @@
   DCHECK(instr->key()->representation().IsTagged());
   DCHECK(instr->value()->representation().IsTagged());
 
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
-    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
-  }
+  LOperand* slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+  LOperand* vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
 
   LStoreKeyedGeneric* result =
       new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
@@ -2266,13 +2257,8 @@
   LOperand* obj =
       UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
   LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
-    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
-  }
-
+  LOperand* slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+  LOperand* vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
   LStoreNamedGeneric* result =
       new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
   return MarkAsCall(result, instr);
diff --git a/src/crankshaft/ppc/lithium-ppc.h b/src/crankshaft/ppc/lithium-ppc.h
index f089b02..f5e11e3 100644
--- a/src/crankshaft/ppc/lithium-ppc.h
+++ b/src/crankshaft/ppc/lithium-ppc.h
@@ -53,7 +53,6 @@
   V(ConstantI)                               \
   V(ConstantS)                               \
   V(ConstantT)                               \
-  V(ConstructDouble)                         \
   V(Context)                                 \
   V(DebugBreak)                              \
   V(DeclareGlobals)                          \
@@ -98,6 +97,8 @@
   V(LoadNamedGeneric)                        \
   V(MathAbs)                                 \
   V(MathClz32)                               \
+  V(MathCos)                                 \
+  V(MathSin)                                 \
   V(MathExp)                                 \
   V(MathFloorD)                              \
   V(MathFloorI)                              \
@@ -901,22 +902,31 @@
   DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
 };
 
-
-class LMathExp final : public LTemplateInstruction<1, 1, 3> {
+class LMathCos final : public LTemplateInstruction<1, 1, 0> {
  public:
-  LMathExp(LOperand* value, LOperand* double_temp, LOperand* temp1,
-           LOperand* temp2) {
-    inputs_[0] = value;
-    temps_[0] = temp1;
-    temps_[1] = temp2;
-    temps_[2] = double_temp;
-    ExternalReference::InitializeMathExpData();
-  }
+  explicit LMathCos(LOperand* value) { inputs_[0] = value; }
 
   LOperand* value() { return inputs_[0]; }
-  LOperand* temp1() { return temps_[0]; }
-  LOperand* temp2() { return temps_[1]; }
-  LOperand* double_temp() { return temps_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
+
+
+class LMathSin final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathSin(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
+};
+
+
+class LMathExp final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathExp(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
 };
@@ -1535,18 +1545,14 @@
   DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
 };
 
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
  public:
-  LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
-                     LOperand* vector) {
+  LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
     inputs_[0] = context;
-    inputs_[1] = global_object;
     temps_[0] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
-  LOperand* global_object() { return inputs_[1]; }
   LOperand* temp_vector() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
@@ -2268,20 +2274,6 @@
 };
 
 
-class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
- public:
-  LConstructDouble(LOperand* hi, LOperand* lo) {
-    inputs_[0] = hi;
-    inputs_[1] = lo;
-  }
-
-  LOperand* hi() { return inputs_[0]; }
-  LOperand* lo() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
-};
-
-
 class LAllocate final : public LTemplateInstruction<1, 2, 2> {
  public:
   LAllocate(LOperand* context, LOperand* size, LOperand* temp1,
@@ -2464,6 +2456,8 @@
   LInstruction* DoMathFround(HUnaryMathOperation* instr);
   LInstruction* DoMathAbs(HUnaryMathOperation* instr);
   LInstruction* DoMathLog(HUnaryMathOperation* instr);
+  LInstruction* DoMathCos(HUnaryMathOperation* instr);
+  LInstruction* DoMathSin(HUnaryMathOperation* instr);
   LInstruction* DoMathExp(HUnaryMathOperation* instr);
   LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
   LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
diff --git a/src/crankshaft/s390/OWNERS b/src/crankshaft/s390/OWNERS
index eb007cb..752e8e3 100644
--- a/src/crankshaft/s390/OWNERS
+++ b/src/crankshaft/s390/OWNERS
@@ -3,3 +3,4 @@
 joransiu@ca.ibm.com
 mbrandy@us.ibm.com
 michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/src/crankshaft/s390/lithium-codegen-s390.cc b/src/crankshaft/s390/lithium-codegen-s390.cc
index 38d1808..890545c 100644
--- a/src/crankshaft/s390/lithium-codegen-s390.cc
+++ b/src/crankshaft/s390/lithium-codegen-s390.cc
@@ -12,7 +12,6 @@
 #include "src/crankshaft/s390/lithium-gap-resolver-s390.h"
 #include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
-#include "src/profiler/cpu-profiler.h"
 
 namespace v8 {
 namespace internal {
@@ -756,7 +755,7 @@
                                             !frame_is_built_);
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
-    if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+    if (FLAG_trace_deopt || isolate()->is_profiling() ||
         jump_table_.is_empty() ||
         !table_entry.IsEquivalentTo(jump_table_.last())) {
       jump_table_.Add(table_entry, zone());
@@ -820,7 +819,6 @@
 void LCodeGen::RecordAndWritePosition(int position) {
   if (position == RelocInfo::kNoPosition) return;
   masm()->positions_recorder()->RecordPosition(position);
-  masm()->positions_recorder()->WriteRecordedPositions();
 }
 
 static const char* LabelType(LLabel* label) {
@@ -2568,10 +2566,10 @@
   DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
   __ LoadP(object_prototype,
            FieldMemOperand(object_map, Map::kPrototypeOffset));
-  __ CmpP(object_prototype, prototype);
-  EmitTrueBranch(instr, eq);
   __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
   EmitFalseBranch(instr, eq);
+  __ CmpP(object_prototype, prototype);
+  EmitTrueBranch(instr, eq);
   __ LoadP(object_map,
            FieldMemOperand(object_prototype, HeapObject::kMapOffset));
   __ b(&loop);
@@ -2670,15 +2668,12 @@
 
 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->global_object())
-             .is(LoadDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->result()).is(r2));
 
-  __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
   EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
-                        isolate(), instr->typeof_mode(), PREMONOMORPHIC)
-                        .code();
+  Handle<Code> ic =
+      CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
+          .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2779,10 +2774,7 @@
   // Name is always in r4.
   __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
   EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
-                        isolate(), NOT_INSIDE_TYPEOF,
-                        instr->hydrogen()->initialization_state())
-                        .code();
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2872,6 +2864,7 @@
   }
   int element_size_shift = ElementsKindToShiftSize(elements_kind);
   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+  bool keyMaybeNegative = instr->hydrogen()->IsDehoisted();
   int base_offset = instr->base_offset();
   bool use_scratch = false;
 
@@ -2885,7 +2878,8 @@
         use_scratch = true;
       }
     } else {
-      __ IndexToArrayOffset(scratch0(), key, element_size_shift, key_is_smi);
+      __ IndexToArrayOffset(scratch0(), key, element_size_shift, key_is_smi,
+                            keyMaybeNegative);
       use_scratch = true;
     }
     if (elements_kind == FLOAT32_ELEMENTS) {
@@ -2905,7 +2899,8 @@
     Register result = ToRegister(instr->result());
     MemOperand mem_operand =
         PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
-                            constant_key, element_size_shift, base_offset);
+                            constant_key, element_size_shift, base_offset,
+                            keyMaybeNegative);
     switch (elements_kind) {
       case INT8_ELEMENTS:
         __ LoadB(result, mem_operand);
@@ -2959,6 +2954,7 @@
 
   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+  bool keyMaybeNegative = instr->hydrogen()->IsDehoisted();
   int constant_key = 0;
   if (key_is_constant) {
     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
@@ -2973,7 +2969,8 @@
   intptr_t base_offset = instr->base_offset() + constant_key * kDoubleSize;
   if (!key_is_constant) {
     use_scratch = true;
-    __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
+    __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi,
+                          keyMaybeNegative);
   }
 
   // Memory references support up to 20-bits signed displacement in RXY form
@@ -3095,7 +3092,8 @@
                                          bool key_is_constant, bool key_is_smi,
                                          int constant_key,
                                          int element_size_shift,
-                                         int base_offset) {
+                                         int base_offset,
+                                         bool keyMaybeNegative) {
   Register scratch = scratch0();
 
   if (key_is_constant) {
@@ -3113,7 +3111,8 @@
       (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0));
 
   if (needs_shift) {
-    __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
+    __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi,
+                          keyMaybeNegative);
   } else {
     scratch = key;
   }
@@ -3129,14 +3128,9 @@
   DCHECK(ToRegister(instr->context()).is(cp));
   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
+  EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-  }
-
-  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
-                        isolate(), instr->hydrogen()->initialization_state())
-                        .code();
+  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3671,23 +3665,31 @@
   }
 }
 
-void LCodeGen::DoMathExp(LMathExp* instr) {
-  DoubleRegister input = ToDoubleRegister(instr->value());
-  DoubleRegister result = ToDoubleRegister(instr->result());
-  DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
-  DoubleRegister double_scratch2 = double_scratch0();
-  Register temp1 = ToRegister(instr->temp1());
-  Register temp2 = ToRegister(instr->temp2());
+void LCodeGen::DoMathCos(LMathCos* instr) {
+  __ PrepareCallCFunction(0, 1, scratch0());
+  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+  __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
+  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
 
-  MathExpGenerator::EmitMathExp(masm(), input, result, double_scratch1,
-                                double_scratch2, temp1, temp2, scratch0());
+void LCodeGen::DoMathSin(LMathSin* instr) {
+  __ PrepareCallCFunction(0, 1, scratch0());
+  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+  __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
+  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+  __ PrepareCallCFunction(0, 1, scratch0());
+  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+  __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
+  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
 }
 
 void LCodeGen::DoMathLog(LMathLog* instr) {
   __ PrepareCallCFunction(0, 1, scratch0());
   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
-  __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 0,
-                   1);
+  __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
 }
 
@@ -3716,7 +3718,9 @@
 #endif
   if (FLAG_code_comments) {
     if (actual.is_reg()) {
-      Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+      Comment(";;; PrepareForTailCall, actual: %s {",
+              RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
+                  actual.reg().code()));
     } else {
       Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
     }
@@ -3820,14 +3824,8 @@
   DCHECK(ToRegister(instr->result()).is(r2));
 
   __ mov(r2, Operand(instr->arity()));
-  if (instr->arity() == 1) {
-    // We only need the allocation site for the case we have a length argument.
-    // The case may bail out to the runtime, which will determine the correct
-    // elements kind with the site.
-    __ Move(r4, instr->hydrogen()->site());
-  } else {
-    __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
-  }
+  __ Move(r4, instr->hydrogen()->site());
+
   ElementsKind kind = instr->hydrogen()->elements_kind();
   AllocationSiteOverrideMode override_mode =
       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
@@ -3859,7 +3857,7 @@
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
     __ bind(&done);
   } else {
-    ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+    ArrayNArgumentsConstructorStub stub(isolate());
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
@@ -3990,15 +3988,12 @@
   DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-  }
+  EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
 
   __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
-  Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
-                        isolate(), instr->language_mode(),
-                        instr->hydrogen()->initialization_state())
-                        .code();
+  Handle<Code> ic =
+      CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
+          .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -4060,6 +4055,7 @@
   }
   int element_size_shift = ElementsKindToShiftSize(elements_kind);
   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+  bool keyMaybeNegative = instr->hydrogen()->IsDehoisted();
   int base_offset = instr->base_offset();
 
   if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
@@ -4079,7 +4075,8 @@
         address = external_pointer;
       }
     } else {
-      __ IndexToArrayOffset(address, key, element_size_shift, key_is_smi);
+      __ IndexToArrayOffset(address, key, element_size_shift, key_is_smi,
+                            keyMaybeNegative);
       __ AddP(address, external_pointer);
     }
     if (elements_kind == FLOAT32_ELEMENTS) {
@@ -4092,7 +4089,8 @@
     Register value(ToRegister(instr->value()));
     MemOperand mem_operand =
         PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
-                            constant_key, element_size_shift, base_offset);
+                            constant_key, element_size_shift, base_offset,
+                            keyMaybeNegative);
     switch (elements_kind) {
       case UINT8_ELEMENTS:
       case UINT8_CLAMPED_ELEMENTS:
@@ -4160,6 +4158,7 @@
   }
   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+  bool keyMaybeNegative = instr->hydrogen()->IsDehoisted();
   int base_offset = instr->base_offset() + constant_key * kDoubleSize;
   bool use_scratch = false;
   intptr_t address_offset = base_offset;
@@ -4173,7 +4172,8 @@
     }
   } else {
     use_scratch = true;
-    __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
+    __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi,
+                          keyMaybeNegative);
     // Memory references support up to 20-bits signed displacement in RXY form
     if (!is_int20((address_offset))) {
       __ AddP(scratch, Operand(address_offset));
@@ -4291,13 +4291,10 @@
   DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-  }
+  EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
 
   Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
-                        isolate(), instr->language_mode(),
-                        instr->hydrogen()->initialization_state())
+                        isolate(), instr->language_mode())
                         .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
@@ -4427,8 +4424,7 @@
     DCHECK(object_reg.is(r2));
     PushSafepointRegistersScope scope(this);
     __ Move(r3, to_map);
-    bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
-    TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+    TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
     __ CallStub(&stub);
     RecordSafepointWithRegisters(instr->pointer_map(), 0,
                                  Safepoint::kLazyDeopt);
@@ -5207,20 +5203,6 @@
   }
 }
 
-void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
-  Register hi_reg = ToRegister(instr->hi());
-  Register lo_reg = ToRegister(instr->lo());
-  DoubleRegister result_reg = ToDoubleRegister(instr->result());
-  Register scratch = scratch0();
-
-  // Combine hi_reg:lo_reg into a single 64-bit register.
-  __ sllg(scratch, hi_reg, Operand(32));
-  __ lr(scratch, lo_reg);
-
-  // Bitwise convert from GPR to FPR
-  __ ldgr(result_reg, scratch);
-}
-
 void LCodeGen::DoAllocate(LAllocate* instr) {
   class DeferredAllocate final : public LDeferredCode {
    public:
diff --git a/src/crankshaft/s390/lithium-codegen-s390.h b/src/crankshaft/s390/lithium-codegen-s390.h
index 7721b30..6abd4fa 100644
--- a/src/crankshaft/s390/lithium-codegen-s390.h
+++ b/src/crankshaft/s390/lithium-codegen-s390.h
@@ -116,7 +116,8 @@
   MemOperand PrepareKeyedOperand(Register key, Register base,
                                  bool key_is_constant, bool key_is_tagged,
                                  int constant_key, int element_size_shift,
-                                 int base_offset);
+                                 int base_offset,
+                                 bool keyMaybeNegative = true);
 
   // Emit frame translation commands for an environment.
   void WriteTranslation(LEnvironment* environment, Translation* translation);
diff --git a/src/crankshaft/s390/lithium-s390.cc b/src/crankshaft/s390/lithium-s390.cc
index fbc1970..033484c 100644
--- a/src/crankshaft/s390/lithium-s390.cc
+++ b/src/crankshaft/s390/lithium-s390.cc
@@ -985,6 +985,10 @@
       return DoMathAbs(instr);
     case kMathLog:
       return DoMathLog(instr);
+    case kMathCos:
+      return DoMathCos(instr);
+    case kMathSin:
+      return DoMathSin(instr);
     case kMathExp:
       return DoMathExp(instr);
     case kMathSqrt:
@@ -1034,8 +1038,8 @@
 LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
   DCHECK(instr->representation().IsDouble());
   DCHECK(instr->value()->representation().IsDouble());
-  LOperand* input = UseFixedDouble(instr->value(), d1);
-  return MarkAsCall(DefineFixedDouble(new (zone()) LMathLog(input), d1), instr);
+  LOperand* input = UseFixedDouble(instr->value(), d0);
+  return MarkAsCall(DefineFixedDouble(new (zone()) LMathLog(input), d0), instr);
 }
 
 LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
@@ -1044,15 +1048,25 @@
   return DefineAsRegister(result);
 }
 
+LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseFixedDouble(instr->value(), d0);
+  return MarkAsCall(DefineFixedDouble(new (zone()) LMathCos(input), d0), instr);
+}
+
+LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseFixedDouble(instr->value(), d0);
+  return MarkAsCall(DefineFixedDouble(new (zone()) LMathSin(input), d0), instr);
+}
+
 LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
   DCHECK(instr->representation().IsDouble());
   DCHECK(instr->value()->representation().IsDouble());
-  LOperand* input = UseRegister(instr->value());
-  LOperand* temp1 = TempRegister();
-  LOperand* temp2 = TempRegister();
-  LOperand* double_temp = TempDoubleRegister();
-  LMathExp* result = new (zone()) LMathExp(input, double_temp, temp1, temp2);
-  return DefineAsRegister(result);
+  LOperand* input = UseFixedDouble(instr->value(), d0);
+  return MarkAsCall(DefineFixedDouble(new (zone()) LMathExp(input), d0), instr);
 }
 
 LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
@@ -1779,12 +1793,6 @@
   return DefineAsRegister(new (zone()) LDoubleBits(UseRegister(value)));
 }
 
-LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
-  LOperand* lo = UseRegister(instr->lo());
-  LOperand* hi = UseRegister(instr->hi());
-  return DefineAsRegister(new (zone()) LConstructDouble(hi, lo));
-}
-
 LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
   LOperand* context = info()->IsStub() ? UseFixed(instr->context(), cp) : NULL;
   LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
@@ -1812,14 +1820,9 @@
 
 LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* global_object =
-      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
-  LLoadGlobalGeneric* result =
-      new (zone()) LLoadGlobalGeneric(context, global_object, vector);
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+
+  LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
   return MarkAsCall(DefineFixed(result, r2), instr);
 }
 
@@ -1859,10 +1862,7 @@
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* object =
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
 
   LInstruction* result =
       DefineFixed(new (zone()) LLoadNamedGeneric(context, object, vector), r2);
@@ -1928,10 +1928,7 @@
   LOperand* object =
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
   LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
 
   LInstruction* result = DefineFixed(
       new (zone()) LLoadKeyedGeneric(context, object, key, vector), r2);
@@ -1988,12 +1985,8 @@
   DCHECK(instr->key()->representation().IsTagged());
   DCHECK(instr->value()->representation().IsTagged());
 
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
-    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
-  }
+  LOperand* slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+  LOperand* vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
 
   LStoreKeyedGeneric* result =
       new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
@@ -2076,13 +2069,8 @@
   LOperand* obj =
       UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
   LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
-    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
-  }
-
+  LOperand* slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+  LOperand* vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
   LStoreNamedGeneric* result =
       new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
   return MarkAsCall(result, instr);
diff --git a/src/crankshaft/s390/lithium-s390.h b/src/crankshaft/s390/lithium-s390.h
index 407d45d..283629f 100644
--- a/src/crankshaft/s390/lithium-s390.h
+++ b/src/crankshaft/s390/lithium-s390.h
@@ -53,7 +53,6 @@
   V(ConstantI)                               \
   V(ConstantS)                               \
   V(ConstantT)                               \
-  V(ConstructDouble)                         \
   V(Context)                                 \
   V(DebugBreak)                              \
   V(DeclareGlobals)                          \
@@ -98,6 +97,8 @@
   V(LoadNamedGeneric)                        \
   V(MathAbs)                                 \
   V(MathClz32)                               \
+  V(MathCos)                                 \
+  V(MathSin)                                 \
   V(MathExp)                                 \
   V(MathFloor)                               \
   V(MathFround)                              \
@@ -836,21 +837,29 @@
   DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
 };
 
-class LMathExp final : public LTemplateInstruction<1, 1, 3> {
+class LMathCos final : public LTemplateInstruction<1, 1, 0> {
  public:
-  LMathExp(LOperand* value, LOperand* double_temp, LOperand* temp1,
-           LOperand* temp2) {
-    inputs_[0] = value;
-    temps_[0] = temp1;
-    temps_[1] = temp2;
-    temps_[2] = double_temp;
-    ExternalReference::InitializeMathExpData();
-  }
+  explicit LMathCos(LOperand* value) { inputs_[0] = value; }
 
   LOperand* value() { return inputs_[0]; }
-  LOperand* temp1() { return temps_[0]; }
-  LOperand* temp2() { return temps_[1]; }
-  LOperand* double_temp() { return temps_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
+
+class LMathSin final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathSin(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
+};
+
+class LMathExp final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathExp(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
 };
@@ -1430,17 +1439,14 @@
   DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
 };
 
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
  public:
-  LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
-                     LOperand* vector) {
+  LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
     inputs_[0] = context;
-    inputs_[1] = global_object;
     temps_[0] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
-  LOperand* global_object() { return inputs_[1]; }
   LOperand* temp_vector() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
@@ -2118,19 +2124,6 @@
   DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
 };
 
-class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
- public:
-  LConstructDouble(LOperand* hi, LOperand* lo) {
-    inputs_[0] = hi;
-    inputs_[1] = lo;
-  }
-
-  LOperand* hi() { return inputs_[0]; }
-  LOperand* lo() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
-};
-
 class LAllocate final : public LTemplateInstruction<1, 2, 2> {
  public:
   LAllocate(LOperand* context, LOperand* size, LOperand* temp1,
@@ -2303,6 +2296,8 @@
   LInstruction* DoMathFround(HUnaryMathOperation* instr);
   LInstruction* DoMathAbs(HUnaryMathOperation* instr);
   LInstruction* DoMathLog(HUnaryMathOperation* instr);
+  LInstruction* DoMathCos(HUnaryMathOperation* instr);
+  LInstruction* DoMathSin(HUnaryMathOperation* instr);
   LInstruction* DoMathExp(HUnaryMathOperation* instr);
   LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
   LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
diff --git a/src/crankshaft/typing.cc b/src/crankshaft/typing.cc
index 9bd09ac..013f50f 100644
--- a/src/crankshaft/typing.cc
+++ b/src/crankshaft/typing.cc
@@ -24,7 +24,7 @@
       osr_ast_id_(osr_ast_id),
       root_(root),
       oracle_(isolate, zone, handle(closure->shared()->code()),
-              handle(closure->shared()->feedback_vector()),
+              handle(closure->feedback_vector()),
               handle(closure->context()->native_context())),
       store_(zone),
       bounds_(bounds) {
diff --git a/src/crankshaft/x64/lithium-codegen-x64.cc b/src/crankshaft/x64/lithium-codegen-x64.cc
index 350543e..c82f6c1 100644
--- a/src/crankshaft/x64/lithium-codegen-x64.cc
+++ b/src/crankshaft/x64/lithium-codegen-x64.cc
@@ -12,7 +12,6 @@
 #include "src/crankshaft/hydrogen-osr.h"
 #include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
-#include "src/profiler/cpu-profiler.h"
 
 namespace v8 {
 namespace internal {
@@ -760,7 +759,7 @@
                                             !frame_is_built_);
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
-    if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+    if (FLAG_trace_deopt || isolate()->is_profiling() ||
         jump_table_.is_empty() ||
         !table_entry.IsEquivalentTo(jump_table_.last())) {
       jump_table_.Add(table_entry, zone());
@@ -839,7 +838,6 @@
 void LCodeGen::RecordAndWritePosition(int position) {
   if (position == RelocInfo::kNoPosition) return;
   masm()->positions_recorder()->RecordPosition(position);
-  masm()->positions_recorder()->WriteRecordedPositions();
 }
 
 
@@ -1779,7 +1777,7 @@
           : SmiValuesAre31Bits());
       __ cmpl(left_reg, right_imm);
       __ j(condition, &return_left, Label::kNear);
-      __ movp(left_reg, right_imm);
+      __ movl(left_reg, right_imm);
     } else if (right->IsRegister()) {
       Register right_reg = ToRegister(right);
       if (instr->hydrogen_value()->representation().IsSmi()) {
@@ -1887,13 +1885,12 @@
       __ Movapd(result, result);
       break;
     case Token::MOD: {
-      XMMRegister xmm_scratch = double_scratch0();
-      __ PrepareCallCFunction(2);
-      __ Movapd(xmm_scratch, left);
+      DCHECK(left.is(xmm0));
       DCHECK(right.is(xmm1));
+      DCHECK(result.is(xmm0));
+      __ PrepareCallCFunction(2);
       __ CallCFunction(
           ExternalReference::mod_two_doubles_operation(isolate()), 2);
-      __ Movapd(result, xmm_scratch);
       break;
     }
     default:
@@ -2458,7 +2455,6 @@
   Label loop;
   __ bind(&loop);
 
-
   // Deoptimize if the object needs to be access checked.
   __ testb(FieldOperand(object_map, Map::kBitFieldOffset),
            Immediate(1 << Map::kIsAccessCheckNeeded));
@@ -2468,10 +2464,10 @@
   DeoptimizeIf(equal, instr, Deoptimizer::kProxy);
 
   __ movp(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
-  __ cmpp(object_prototype, prototype);
-  EmitTrueBranch(instr, equal);
   __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
   EmitFalseBranch(instr, equal);
+  __ cmpp(object_prototype, prototype);
+  EmitTrueBranch(instr, equal);
   __ movp(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
   __ jmp(&loop);
 }
@@ -2563,15 +2559,12 @@
 
 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   DCHECK(ToRegister(instr->context()).is(rsi));
-  DCHECK(ToRegister(instr->global_object())
-             .is(LoadDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->result()).is(rax));
 
-  __ Move(LoadDescriptor::NameRegister(), instr->name());
   EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
-                        isolate(), instr->typeof_mode(), PREMONOMORPHIC)
-                        .code();
+  Handle<Code> ic =
+      CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
+          .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2686,10 +2679,7 @@
 
   __ Move(LoadDescriptor::NameRegister(), instr->name());
   EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
-                        isolate(), NOT_INSIDE_TYPEOF,
-                        instr->hydrogen()->initialization_state())
-                        .code();
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2973,13 +2963,9 @@
   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-  }
+  EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
 
-  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
-                        isolate(), instr->hydrogen()->initialization_state())
-                        .code();
+  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3608,45 +3594,32 @@
   }
 }
 
-
-void LCodeGen::DoMathExp(LMathExp* instr) {
-  XMMRegister input = ToDoubleRegister(instr->value());
-  XMMRegister result = ToDoubleRegister(instr->result());
-  XMMRegister temp0 = double_scratch0();
-  Register temp1 = ToRegister(instr->temp1());
-  Register temp2 = ToRegister(instr->temp2());
-
-  MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
+void LCodeGen::DoMathCos(LMathCos* instr) {
+  DCHECK(ToDoubleRegister(instr->value()).is(xmm0));
+  DCHECK(ToDoubleRegister(instr->result()).is(xmm0));
+  __ PrepareCallCFunction(1);
+  __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 1);
 }
 
+void LCodeGen::DoMathExp(LMathExp* instr) {
+  DCHECK(ToDoubleRegister(instr->value()).is(xmm0));
+  DCHECK(ToDoubleRegister(instr->result()).is(xmm0));
+  __ PrepareCallCFunction(1);
+  __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 1);
+}
+
+void LCodeGen::DoMathSin(LMathSin* instr) {
+  DCHECK(ToDoubleRegister(instr->value()).is(xmm0));
+  DCHECK(ToDoubleRegister(instr->result()).is(xmm0));
+  __ PrepareCallCFunction(1);
+  __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 1);
+}
 
 void LCodeGen::DoMathLog(LMathLog* instr) {
-  DCHECK(instr->value()->Equals(instr->result()));
-  XMMRegister input_reg = ToDoubleRegister(instr->value());
-  XMMRegister xmm_scratch = double_scratch0();
-  Label positive, done, zero;
-  __ Xorpd(xmm_scratch, xmm_scratch);
-  __ Ucomisd(input_reg, xmm_scratch);
-  __ j(above, &positive, Label::kNear);
-  __ j(not_carry, &zero, Label::kNear);
-  __ Pcmpeqd(input_reg, input_reg);
-  __ jmp(&done, Label::kNear);
-  __ bind(&zero);
-  ExternalReference ninf =
-      ExternalReference::address_of_negative_infinity();
-  Operand ninf_operand = masm()->ExternalOperand(ninf);
-  __ Movsd(input_reg, ninf_operand);
-  __ jmp(&done, Label::kNear);
-  __ bind(&positive);
-  __ fldln2();
-  __ subp(rsp, Immediate(kDoubleSize));
-  __ Movsd(Operand(rsp, 0), input_reg);
-  __ fld_d(Operand(rsp, 0));
-  __ fyl2x();
-  __ fstp_d(Operand(rsp, 0));
-  __ Movsd(input_reg, Operand(rsp, 0));
-  __ addp(rsp, Immediate(kDoubleSize));
-  __ bind(&done);
+  DCHECK(ToDoubleRegister(instr->value()).is(xmm0));
+  DCHECK(ToDoubleRegister(instr->result()).is(xmm0));
+  __ PrepareCallCFunction(1);
+  __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 1);
 }
 
 
@@ -3669,7 +3642,9 @@
 #endif
   if (FLAG_code_comments) {
     if (actual.is_reg()) {
-      Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+      Comment(";;; PrepareForTailCall, actual: %s {",
+              RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
+                  actual.reg().code()));
     } else {
       Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
     }
@@ -3738,14 +3713,7 @@
   DCHECK(ToRegister(instr->result()).is(rax));
 
   __ Set(rax, instr->arity());
-  if (instr->arity() == 1) {
-    // We only need the allocation site for the case we have a length argument.
-    // The case may bail out to the runtime, which will determine the correct
-    // elements kind with the site.
-    __ Move(rbx, instr->hydrogen()->site());
-  } else {
-    __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
-  }
+  __ Move(rbx, instr->hydrogen()->site());
 
   ElementsKind kind = instr->hydrogen()->elements_kind();
   AllocationSiteOverrideMode override_mode =
@@ -3779,7 +3747,7 @@
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
     __ bind(&done);
   } else {
-    ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+    ArrayNArgumentsConstructorStub stub(isolate());
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
@@ -3942,14 +3910,12 @@
   DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-  }
+  EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
 
   __ Move(StoreDescriptor::NameRegister(), instr->hydrogen()->name());
-  Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
-                        isolate(), instr->language_mode(),
-                        instr->hydrogen()->initialization_state()).code();
+  Handle<Code> ic =
+      CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
+          .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -4201,13 +4167,11 @@
   DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-  }
+  EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
 
   Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
-                        isolate(), instr->language_mode(),
-                        instr->hydrogen()->initialization_state()).code();
+                        isolate(), instr->language_mode())
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -4338,8 +4302,7 @@
     DCHECK(ToRegister(instr->context()).is(rsi));
     PushSafepointRegistersScope scope(this);
     __ Move(rbx, to_map);
-    bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
-    TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+    TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
     __ CallStub(&stub);
     RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
   }
@@ -4789,20 +4752,21 @@
     __ Set(input_reg, 0);
   } else {
     XMMRegister scratch = ToDoubleRegister(instr->temp());
-    DCHECK(!scratch.is(xmm0));
+    DCHECK(!scratch.is(double_scratch0()));
     __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
                    Heap::kHeapNumberMapRootIndex);
     DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
-    __ Movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
-    __ Cvttsd2si(input_reg, xmm0);
+    __ Movsd(double_scratch0(),
+             FieldOperand(input_reg, HeapNumber::kValueOffset));
+    __ Cvttsd2si(input_reg, double_scratch0());
     __ Cvtlsi2sd(scratch, input_reg);
-    __ Ucomisd(xmm0, scratch);
+    __ Ucomisd(double_scratch0(), scratch);
     DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
     DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
     if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
       __ testl(input_reg, input_reg);
       __ j(not_zero, done);
-      __ Movmskpd(input_reg, xmm0);
+      __ Movmskpd(input_reg, double_scratch0());
       __ andl(input_reg, Immediate(1));
       DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
     }
@@ -5130,17 +5094,6 @@
 }
 
 
-void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
-  Register hi_reg = ToRegister(instr->hi());
-  Register lo_reg = ToRegister(instr->lo());
-  XMMRegister result_reg = ToDoubleRegister(instr->result());
-  __ movl(kScratchRegister, hi_reg);
-  __ shlq(kScratchRegister, Immediate(32));
-  __ orq(kScratchRegister, lo_reg);
-  __ Movq(result_reg, kScratchRegister);
-}
-
-
 void LCodeGen::DoAllocate(LAllocate* instr) {
   class DeferredAllocate final : public LDeferredCode {
    public:
diff --git a/src/crankshaft/x64/lithium-codegen-x64.h b/src/crankshaft/x64/lithium-codegen-x64.h
index f643e2b..c586ef5 100644
--- a/src/crankshaft/x64/lithium-codegen-x64.h
+++ b/src/crankshaft/x64/lithium-codegen-x64.h
@@ -115,7 +115,7 @@
   Scope* scope() const { return scope_; }
   HGraph* graph() const { return chunk()->graph(); }
 
-  XMMRegister double_scratch0() const { return xmm0; }
+  XMMRegister double_scratch0() const { return kScratchDoubleReg; }
 
   void EmitClassOfTest(Label* if_true,
                        Label* if_false,
diff --git a/src/crankshaft/x64/lithium-gap-resolver-x64.cc b/src/crankshaft/x64/lithium-gap-resolver-x64.cc
index 3808c37..94dffb3 100644
--- a/src/crankshaft/x64/lithium-gap-resolver-x64.cc
+++ b/src/crankshaft/x64/lithium-gap-resolver-x64.cc
@@ -223,8 +223,8 @@
       __ Movsd(cgen_->ToDoubleRegister(destination), src);
     } else {
       DCHECK(destination->IsDoubleStackSlot());
-      __ Movsd(xmm0, src);
-      __ Movsd(cgen_->ToOperand(destination), xmm0);
+      __ Movsd(kScratchDoubleReg, src);
+      __ Movsd(cgen_->ToOperand(destination), kScratchDoubleReg);
     }
   } else {
     UNREACHABLE();
@@ -264,18 +264,18 @@
     // Swap two stack slots or two double stack slots.
     Operand src = cgen_->ToOperand(source);
     Operand dst = cgen_->ToOperand(destination);
-    __ Movsd(xmm0, src);
+    __ Movsd(kScratchDoubleReg, src);
     __ movp(kScratchRegister, dst);
-    __ Movsd(dst, xmm0);
+    __ Movsd(dst, kScratchDoubleReg);
     __ movp(src, kScratchRegister);
 
   } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
     // Swap two double registers.
     XMMRegister source_reg = cgen_->ToDoubleRegister(source);
     XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
-    __ Movapd(xmm0, source_reg);
+    __ Movapd(kScratchDoubleReg, source_reg);
     __ Movapd(source_reg, destination_reg);
-    __ Movapd(destination_reg, xmm0);
+    __ Movapd(destination_reg, kScratchDoubleReg);
 
   } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
     // Swap a double register and a double stack slot.
@@ -287,9 +287,9 @@
     LOperand* other = source->IsDoubleRegister() ? destination : source;
     DCHECK(other->IsDoubleStackSlot());
     Operand other_operand = cgen_->ToOperand(other);
-    __ Movapd(xmm0, reg);
+    __ Movapd(kScratchDoubleReg, reg);
     __ Movsd(reg, other_operand);
-    __ Movsd(other_operand, xmm0);
+    __ Movsd(other_operand, kScratchDoubleReg);
 
   } else {
     // No other combinations are possible.
diff --git a/src/crankshaft/x64/lithium-x64.cc b/src/crankshaft/x64/lithium-x64.cc
index daedd72..01b9918 100644
--- a/src/crankshaft/x64/lithium-x64.cc
+++ b/src/crankshaft/x64/lithium-x64.cc
@@ -714,10 +714,10 @@
   DCHECK(instr->left()->representation().IsDouble());
   DCHECK(instr->right()->representation().IsDouble());
   if (op == Token::MOD) {
-    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* left = UseFixedDouble(instr->BetterLeftOperand(), xmm0);
     LOperand* right = UseFixedDouble(instr->BetterRightOperand(), xmm1);
     LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
-    return MarkAsCall(DefineSameAsFirst(result), instr);
+    return MarkAsCall(DefineFixedDouble(result, xmm0), instr);
   } else {
     LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
     LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
@@ -1090,10 +1090,14 @@
       return DoMathFround(instr);
     case kMathAbs:
       return DoMathAbs(instr);
+    case kMathCos:
+      return DoMathCos(instr);
     case kMathLog:
       return DoMathLog(instr);
     case kMathExp:
       return DoMathExp(instr);
+    case kMathSin:
+      return DoMathSin(instr);
     case kMathSqrt:
       return DoMathSqrt(instr);
     case kMathPowHalf:
@@ -1155,8 +1159,9 @@
 LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
   DCHECK(instr->representation().IsDouble());
   DCHECK(instr->value()->representation().IsDouble());
-  LOperand* input = UseRegisterAtStart(instr->value());
-  return MarkAsCall(DefineSameAsFirst(new(zone()) LMathLog(input)), instr);
+  LOperand* input = UseFixedDouble(instr->value(), xmm0);
+  return MarkAsCall(DefineFixedDouble(new (zone()) LMathLog(input), xmm0),
+                    instr);
 }
 
 
@@ -1166,17 +1171,29 @@
   return DefineAsRegister(result);
 }
 
+LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseFixedDouble(instr->value(), xmm0);
+  return MarkAsCall(DefineFixedDouble(new (zone()) LMathCos(input), xmm0),
+                    instr);
+}
 
 LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
   DCHECK(instr->representation().IsDouble());
   DCHECK(instr->value()->representation().IsDouble());
-  LOperand* value = UseTempRegister(instr->value());
-  LOperand* temp1 = TempRegister();
-  LOperand* temp2 = TempRegister();
-  LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
-  return DefineAsRegister(result);
+  LOperand* input = UseFixedDouble(instr->value(), xmm0);
+  return MarkAsCall(DefineFixedDouble(new (zone()) LMathExp(input), xmm0),
+                    instr);
 }
 
+LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseFixedDouble(instr->value(), xmm0);
+  return MarkAsCall(DefineFixedDouble(new (zone()) LMathSin(input), xmm0),
+                    instr);
+}
 
 LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
   LOperand* input = UseAtStart(instr->value());
@@ -1957,13 +1974,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
-  LOperand* lo = UseRegister(instr->lo());
-  LOperand* hi = UseRegister(instr->hi());
-  return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
-}
-
-
 LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
   LOperand* context = info()->IsStub() ? UseFixed(instr->context(), rsi) : NULL;
   LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
@@ -1993,15 +2003,9 @@
 
 LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), rsi);
-  LOperand* global_object =
-      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
 
-  LLoadGlobalGeneric* result =
-      new(zone()) LLoadGlobalGeneric(context, global_object, vector);
+  LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
@@ -2058,10 +2062,7 @@
   LOperand* context = UseFixed(instr->context(), rsi);
   LOperand* object =
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
   LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
       context, object, vector);
   return MarkAsCall(DefineFixed(result, rax), instr);
@@ -2159,10 +2160,7 @@
   LOperand* object =
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
   LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
 
   LLoadKeyedGeneric* result =
       new(zone()) LLoadKeyedGeneric(context, object, key, vector);
@@ -2243,12 +2241,8 @@
   DCHECK(instr->key()->representation().IsTagged());
   DCHECK(instr->value()->representation().IsTagged());
 
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
-    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
-  }
+  LOperand* slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+  LOperand* vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
 
   LStoreKeyedGeneric* result = new (zone())
       LStoreKeyedGeneric(context, object, key, value, slot, vector);
@@ -2355,12 +2349,8 @@
   LOperand* object =
       UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
   LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
-    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
-  }
+  LOperand* slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+  LOperand* vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
 
   LStoreNamedGeneric* result =
       new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
diff --git a/src/crankshaft/x64/lithium-x64.h b/src/crankshaft/x64/lithium-x64.h
index 91f5541..623421c 100644
--- a/src/crankshaft/x64/lithium-x64.h
+++ b/src/crankshaft/x64/lithium-x64.h
@@ -53,7 +53,6 @@
   V(ConstantI)                               \
   V(ConstantS)                               \
   V(ConstantT)                               \
-  V(ConstructDouble)                         \
   V(Context)                                 \
   V(DebugBreak)                              \
   V(DeclareGlobals)                          \
@@ -98,6 +97,7 @@
   V(LoadNamedGeneric)                        \
   V(MathAbs)                                 \
   V(MathClz32)                               \
+  V(MathCos)                                 \
   V(MathExp)                                 \
   V(MathFloorD)                              \
   V(MathFloorI)                              \
@@ -107,6 +107,7 @@
   V(MathPowHalf)                             \
   V(MathRoundD)                              \
   V(MathRoundI)                              \
+  V(MathSin)                                 \
   V(MathSqrt)                                \
   V(MaybeGrowElements)                       \
   V(ModByConstI)                             \
@@ -909,23 +910,32 @@
   DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
 };
 
-
-class LMathExp final : public LTemplateInstruction<1, 1, 2> {
+class LMathCos final : public LTemplateInstruction<1, 1, 0> {
  public:
-  LMathExp(LOperand* value, LOperand* temp1, LOperand* temp2) {
-    inputs_[0] = value;
-    temps_[0] = temp1;
-    temps_[1] = temp2;
-    ExternalReference::InitializeMathExpData();
-  }
+  explicit LMathCos(LOperand* value) { inputs_[0] = value; }
 
   LOperand* value() { return inputs_[0]; }
-  LOperand* temp1() { return temps_[0]; }
-  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
+
+class LMathExp final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathExp(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
 };
 
+class LMathSin final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathSin(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
+};
 
 class LMathSqrt final : public LTemplateInstruction<1, 1, 0> {
  public:
@@ -1581,13 +1591,10 @@
   LOperand* temp_vector() { return temps_[0]; }
 };
 
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
  public:
-  explicit LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
-                              LOperand* vector) {
+  explicit LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
     inputs_[0] = context;
-    inputs_[1] = global_object;
     temps_[0] = vector;
   }
 
@@ -1595,7 +1602,6 @@
   DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
 
   LOperand* context() { return inputs_[0]; }
-  LOperand* global_object() { return inputs_[1]; }
   LOperand* temp_vector() { return temps_[0]; }
 
   Handle<Object> name() const { return hydrogen()->name(); }
@@ -2339,20 +2345,6 @@
 };
 
 
-class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
- public:
-  LConstructDouble(LOperand* hi, LOperand* lo) {
-    inputs_[0] = hi;
-    inputs_[1] = lo;
-  }
-
-  LOperand* hi() { return inputs_[0]; }
-  LOperand* lo() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
-};
-
-
 class LAllocate final : public LTemplateInstruction<1, 2, 1> {
  public:
   LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
@@ -2539,8 +2531,10 @@
   LInstruction* DoMathRound(HUnaryMathOperation* instr);
   LInstruction* DoMathFround(HUnaryMathOperation* instr);
   LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+  LInstruction* DoMathCos(HUnaryMathOperation* instr);
   LInstruction* DoMathLog(HUnaryMathOperation* instr);
   LInstruction* DoMathExp(HUnaryMathOperation* instr);
+  LInstruction* DoMathSin(HUnaryMathOperation* instr);
   LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
   LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
   LInstruction* DoMathClz32(HUnaryMathOperation* instr);
diff --git a/src/crankshaft/x87/lithium-codegen-x87.cc b/src/crankshaft/x87/lithium-codegen-x87.cc
index 641a87a..7bf974c 100644
--- a/src/crankshaft/x87/lithium-codegen-x87.cc
+++ b/src/crankshaft/x87/lithium-codegen-x87.cc
@@ -14,7 +14,6 @@
 #include "src/deoptimizer.h"
 #include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
-#include "src/profiler/cpu-profiler.h"
 #include "src/x87/frames-x87.h"
 
 namespace v8 {
@@ -1025,7 +1024,7 @@
                                             !frame_is_built_);
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
-    if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+    if (FLAG_trace_deopt || isolate()->is_profiling() ||
         jump_table_.is_empty() ||
         !table_entry.IsEquivalentTo(jump_table_.last())) {
       jump_table_.Add(table_entry, zone());
@@ -1102,7 +1101,6 @@
 void LCodeGen::RecordAndWritePosition(int position) {
   if (position == RelocInfo::kNoPosition) return;
   masm()->positions_recorder()->RecordPosition(position);
-  masm()->positions_recorder()->WriteRecordedPositions();
 }
 
 
@@ -2362,9 +2360,7 @@
 
   __ add(esp, Immediate(kDoubleSize));
   int offset = sizeof(kHoleNanUpper32);
-  // x87 converts sNaN(0xfff7fffffff7ffff) to QNaN(0xfffffffffff7ffff),
-  // so we check the upper with 0xffffffff for hole as a temporary fix.
-  __ cmp(MemOperand(esp, -offset), Immediate(0xffffffff));
+  __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
   EmitBranch(instr, equal);
 }
 
@@ -2605,10 +2601,10 @@
   DeoptimizeIf(equal, instr, Deoptimizer::kProxy);
 
   __ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
-  __ cmp(object_prototype, prototype);
-  EmitTrueBranch(instr, equal);
   __ cmp(object_prototype, factory()->null_value());
   EmitFalseBranch(instr, equal);
+  __ cmp(object_prototype, prototype);
+  EmitTrueBranch(instr, equal);
   __ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
   __ jmp(&loop);
 }
@@ -2705,15 +2701,12 @@
 
 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->global_object())
-             .is(LoadDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->result()).is(eax));
 
-  __ mov(LoadDescriptor::NameRegister(), instr->name());
   EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
-                        isolate(), instr->typeof_mode(), PREMONOMORPHIC)
-                        .code();
+  Handle<Code> ic =
+      CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
+          .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2822,10 +2815,7 @@
 
   __ mov(LoadDescriptor::NameRegister(), instr->name());
   EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
-                        isolate(), NOT_INSIDE_TYPEOF,
-                        instr->hydrogen()->initialization_state())
-                        .code();
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3053,13 +3043,9 @@
   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-  }
+  EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
 
-  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
-                        isolate(), instr->hydrogen()->initialization_state())
-                        .code();
+  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3690,40 +3676,17 @@
 
 void LCodeGen::DoMathLog(LMathLog* instr) {
   DCHECK(instr->value()->Equals(instr->result()));
+  X87Register result = ToX87Register(instr->result());
   X87Register input_reg = ToX87Register(instr->value());
   X87Fxch(input_reg);
 
-  Label positive, done, zero, nan_result;
-  __ fldz();
-  __ fld(1);
-  __ FCmp();
-  __ j(below, &nan_result, Label::kNear);
-  __ j(equal, &zero, Label::kNear);
-  // Positive input.
-  // {input, ln2}.
-  __ fldln2();
-  // {ln2, input}.
-  __ fxch();
-  // {result}.
-  __ fyl2x();
-  __ jmp(&done, Label::kNear);
-
-  __ bind(&nan_result);
-  X87PrepareToWrite(input_reg);
-  __ push(Immediate(0xffffffff));
-  __ push(Immediate(0x7fffffff));
-  __ fld_d(MemOperand(esp, 0));
-  __ lea(esp, Operand(esp, kDoubleSize));
-  X87CommitWrite(input_reg);
-  __ jmp(&done, Label::kNear);
-
-  __ bind(&zero);
-  ExternalReference ninf = ExternalReference::address_of_negative_infinity();
-  X87PrepareToWrite(input_reg);
-  __ fld_d(Operand::StaticVariable(ninf));
-  X87CommitWrite(input_reg);
-
-  __ bind(&done);
+  // Pass one double as argument on the stack.
+  __ PrepareCallCFunction(2, eax);
+  __ fstp_d(MemOperand(esp, 0));
+  X87PrepareToWrite(result);
+  __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 2);
+  // Return value is in st(0) on ia32.
+  X87CommitWrite(result);
 }
 
 
@@ -3734,67 +3697,46 @@
   __ Lzcnt(result, input);
 }
 
+void LCodeGen::DoMathCos(LMathCos* instr) {
+  X87Register result = ToX87Register(instr->result());
+  X87Register input_reg = ToX87Register(instr->value());
+  __ fld(x87_stack_.st(input_reg));
+
+  // Pass one double as argument on the stack.
+  __ PrepareCallCFunction(2, eax);
+  __ fstp_d(MemOperand(esp, 0));
+  X87PrepareToWrite(result);
+  __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 2);
+  // Return value is in st(0) on ia32.
+  X87CommitWrite(result);
+}
+
+void LCodeGen::DoMathSin(LMathSin* instr) {
+  X87Register result = ToX87Register(instr->result());
+  X87Register input_reg = ToX87Register(instr->value());
+  __ fld(x87_stack_.st(input_reg));
+
+  // Pass one double as argument on the stack.
+  __ PrepareCallCFunction(2, eax);
+  __ fstp_d(MemOperand(esp, 0));
+  X87PrepareToWrite(result);
+  __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 2);
+  // Return value is in st(0) on ia32.
+  X87CommitWrite(result);
+}
 
 void LCodeGen::DoMathExp(LMathExp* instr) {
-  X87Register input = ToX87Register(instr->value());
-  X87Register result_reg = ToX87Register(instr->result());
-  Register temp_result = ToRegister(instr->temp1());
-  Register temp = ToRegister(instr->temp2());
-  Label slow, done, smi, finish;
-  DCHECK(result_reg.is(input));
+  X87Register result = ToX87Register(instr->result());
+  X87Register input_reg = ToX87Register(instr->value());
+  __ fld(x87_stack_.st(input_reg));
 
-  // Store input into Heap number and call runtime function kMathExpRT.
-  if (FLAG_inline_new) {
-    __ AllocateHeapNumber(temp_result, temp, no_reg, &slow);
-    __ jmp(&done, Label::kNear);
-  }
-
-  // Slow case: Call the runtime system to do the number allocation.
-  __ bind(&slow);
-  {
-    // TODO(3095996): Put a valid pointer value in the stack slot where the
-    // result register is stored, as this register is in the pointer map, but
-    // contains an integer value.
-    __ Move(temp_result, Immediate(0));
-
-    // Preserve the value of all registers.
-    PushSafepointRegistersScope scope(this);
-
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
-    RecordSafepointWithRegisters(instr->pointer_map(), 0,
-                                 Safepoint::kNoLazyDeopt);
-    __ StoreToSafepointRegisterSlot(temp_result, eax);
-  }
-  __ bind(&done);
-  X87LoadForUsage(input);
-  __ fstp_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
-
-  {
-    // Preserve the value of all registers.
-    PushSafepointRegistersScope scope(this);
-
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-    __ push(temp_result);
-    __ CallRuntimeSaveDoubles(Runtime::kMathExpRT);
-    RecordSafepointWithRegisters(instr->pointer_map(), 1,
-                                 Safepoint::kNoLazyDeopt);
-    __ StoreToSafepointRegisterSlot(temp_result, eax);
-  }
-  X87PrepareToWrite(result_reg);
-  // return value of MathExpRT is Smi or Heap Number.
-  __ JumpIfSmi(temp_result, &smi);
-  // Heap number(double)
-  __ fld_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
-  __ jmp(&finish);
-  // SMI
-  __ bind(&smi);
-  __ SmiUntag(temp_result);
-  __ push(temp_result);
-  __ fild_s(MemOperand(esp, 0));
-  __ pop(temp_result);
-  __ bind(&finish);
-  X87CommitWrite(result_reg);
+  // Pass one double as argument on the stack.
+  __ PrepareCallCFunction(2, eax);
+  __ fstp_d(MemOperand(esp, 0));
+  X87PrepareToWrite(result);
+  __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 2);
+  // Return value is in st(0) on ia32.
+  X87CommitWrite(result);
 }
 
 void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
@@ -3809,7 +3751,9 @@
 #endif
   if (FLAG_code_comments) {
     if (actual.is_reg()) {
-      Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+      Comment(";;; PrepareForTailCall, actual: %s {",
+              RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
+                  actual.reg().code()));
     } else {
       Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
     }
@@ -3879,14 +3823,7 @@
   DCHECK(ToRegister(instr->result()).is(eax));
 
   __ Move(eax, Immediate(instr->arity()));
-  if (instr->arity() == 1) {
-    // We only need the allocation site for the case we have a length argument.
-    // The case may bail out to the runtime, which will determine the correct
-    // elements kind with the site.
-    __ mov(ebx, instr->hydrogen()->site());
-  } else {
-    __ mov(ebx, isolate()->factory()->undefined_value());
-  }
+  __ mov(ebx, instr->hydrogen()->site());
 
   ElementsKind kind = instr->hydrogen()->elements_kind();
   AllocationSiteOverrideMode override_mode =
@@ -3920,7 +3857,7 @@
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
     __ bind(&done);
   } else {
-    ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+    ArrayNArgumentsConstructorStub stub(isolate());
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
@@ -4047,14 +3984,12 @@
   DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-  }
+  EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
 
   __ mov(StoreDescriptor::NameRegister(), instr->name());
-  Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
-                        isolate(), instr->language_mode(),
-                        instr->hydrogen()->initialization_state()).code();
+  Handle<Code> ic =
+      CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
+          .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -4116,9 +4051,7 @@
     __ fst_d(MemOperand(esp, 0));
     __ lea(esp, Operand(esp, kDoubleSize));
     int offset = sizeof(kHoleNanUpper32);
-    // x87 converts sNaN(0xfff7fffffff7ffff) to QNaN(0xfffffffffff7ffff),
-    // so we check the upper with 0xffffffff for hole as a temporary fix.
-    __ cmp(MemOperand(esp, -offset), Immediate(0xffffffff));
+    __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
     __ j(not_equal, &no_special_nan_handling, Label::kNear);
     __ mov(operand, Immediate(lower));
     __ mov(operand2, Immediate(upper));
@@ -4204,9 +4137,7 @@
       __ fst_d(MemOperand(esp, 0));
       __ lea(esp, Operand(esp, kDoubleSize));
       int offset = sizeof(kHoleNanUpper32);
-      // x87 converts sNaN(0xfff7fffffff7ffff) to QNaN(0xfffffffffff7ffff),
-      // so we check the upper with 0xffffffff for hole as a temporary fix.
-      __ cmp(MemOperand(esp, -offset), Immediate(0xffffffff));
+      __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
       __ j(not_equal, &no_special_nan_handling, Label::kNear);
       __ mov(double_store_operand, Immediate(lower));
       __ mov(double_store_operand2, Immediate(upper));
@@ -4277,13 +4208,11 @@
   DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  if (instr->hydrogen()->HasVectorAndSlot()) {
-    EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-  }
+  EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
 
   Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
-                        isolate(), instr->language_mode(),
-                        instr->hydrogen()->initialization_state()).code();
+                        isolate(), instr->language_mode())
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -4425,8 +4354,7 @@
     DCHECK(object_reg.is(eax));
     PushSafepointRegistersScope scope(this);
     __ mov(ebx, to_map);
-    bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
-    TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+    TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
     __ CallStub(&stub);
     RecordSafepointWithLazyDeopt(instr,
         RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -5373,21 +5301,6 @@
 }
 
 
-void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
-  Register hi_reg = ToRegister(instr->hi());
-  Register lo_reg = ToRegister(instr->lo());
-  X87Register result_reg = ToX87Register(instr->result());
-  // Follow below pattern to write a x87 fp register.
-  X87PrepareToWrite(result_reg);
-  __ sub(esp, Immediate(kDoubleSize));
-  __ mov(Operand(esp, 0), lo_reg);
-  __ mov(Operand(esp, kPointerSize), hi_reg);
-  __ fld_d(Operand(esp, 0));
-  __ add(esp, Immediate(kDoubleSize));
-  X87CommitWrite(result_reg);
-}
-
-
 void LCodeGen::DoAllocate(LAllocate* instr) {
   class DeferredAllocate final : public LDeferredCode {
    public:
diff --git a/src/crankshaft/x87/lithium-gap-resolver-x87.cc b/src/crankshaft/x87/lithium-gap-resolver-x87.cc
index aa91835..6bfc2e2 100644
--- a/src/crankshaft/x87/lithium-gap-resolver-x87.cc
+++ b/src/crankshaft/x87/lithium-gap-resolver-x87.cc
@@ -168,8 +168,7 @@
 
 Register LGapResolver::GetFreeRegisterNot(Register reg) {
   int skip_index = reg.is(no_reg) ? -1 : reg.code();
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
     int code = config->GetAllocatableGeneralCode(i);
     if (source_uses_[code] == 0 && destination_uses_[code] > 0 &&
@@ -184,8 +183,7 @@
 bool LGapResolver::HasBeenReset() {
   if (!moves_.is_empty()) return false;
   if (spilled_register_ >= 0) return false;
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
     int code = config->GetAllocatableGeneralCode(i);
     if (source_uses_[code] != 0) return false;
@@ -239,8 +237,7 @@
 
   // 3. Prefer to spill a register that is not used in any remaining move
   // because it will not need to be restored until the end.
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
     int code = config->GetAllocatableGeneralCode(i);
     if (source_uses_[code] == 0 && destination_uses_[code] == 0) {
diff --git a/src/crankshaft/x87/lithium-x87.cc b/src/crankshaft/x87/lithium-x87.cc
index 7df70ae..23941ad 100644
--- a/src/crankshaft/x87/lithium-x87.cc
+++ b/src/crankshaft/x87/lithium-x87.cc
@@ -1110,15 +1110,28 @@
 
 LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
   switch (instr->op()) {
-    case kMathFloor: return DoMathFloor(instr);
-    case kMathRound: return DoMathRound(instr);
-    case kMathFround: return DoMathFround(instr);
-    case kMathAbs: return DoMathAbs(instr);
-    case kMathLog: return DoMathLog(instr);
-    case kMathExp: return DoMathExp(instr);
-    case kMathSqrt: return DoMathSqrt(instr);
-    case kMathPowHalf: return DoMathPowHalf(instr);
-    case kMathClz32: return DoMathClz32(instr);
+    case kMathCos:
+      return DoMathCos(instr);
+    case kMathFloor:
+      return DoMathFloor(instr);
+    case kMathRound:
+      return DoMathRound(instr);
+    case kMathFround:
+      return DoMathFround(instr);
+    case kMathAbs:
+      return DoMathAbs(instr);
+    case kMathLog:
+      return DoMathLog(instr);
+    case kMathExp:
+      return DoMathExp(instr);
+    case kMathSqrt:
+      return DoMathSqrt(instr);
+    case kMathPowHalf:
+      return DoMathPowHalf(instr);
+    case kMathClz32:
+      return DoMathClz32(instr);
+    case kMathSin:
+      return DoMathSin(instr);
     default:
       UNREACHABLE();
       return NULL;
@@ -1173,15 +1186,25 @@
   return DefineAsRegister(result);
 }
 
+LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseRegisterAtStart(instr->value());
+  return MarkAsCall(DefineSameAsFirst(new (zone()) LMathCos(input)), instr);
+}
+
+LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseRegisterAtStart(instr->value());
+  return MarkAsCall(DefineSameAsFirst(new (zone()) LMathSin(input)), instr);
+}
 
 LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
   DCHECK(instr->representation().IsDouble());
   DCHECK(instr->value()->representation().IsDouble());
-  LOperand* value = UseRegisterAtStart(instr->value());
-  LOperand* temp1 = FixedTemp(ecx);
-  LOperand* temp2 = FixedTemp(edx);
-  LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
-  return MarkAsCall(DefineSameAsFirst(result), instr);
+  LOperand* input = UseRegisterAtStart(instr->value());
+  return MarkAsCall(DefineSameAsFirst(new (zone()) LMathExp(input)), instr);
 }
 
 
@@ -1969,13 +1992,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
-  LOperand* lo = UseRegister(instr->lo());
-  LOperand* hi = UseRegister(instr->hi());
-  return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
-}
-
-
 LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
   LOperand* context = info()->IsStub() ? UseFixed(instr->context(), esi) : NULL;
   LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
@@ -2005,15 +2021,9 @@
 
 LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* global_object =
-      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
 
-  LLoadGlobalGeneric* result =
-      new(zone()) LLoadGlobalGeneric(context, global_object, vector);
+  LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -2061,10 +2071,7 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* object =
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
   LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
       context, object, vector);
   return MarkAsCall(DefineFixed(result, eax), instr);
@@ -2134,10 +2141,7 @@
   LOperand* object =
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
   LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-  }
+  LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
   LLoadKeyedGeneric* result =
       new(zone()) LLoadKeyedGeneric(context, object, key, vector);
   return MarkAsCall(DefineFixed(result, eax), instr);
@@ -2227,12 +2231,8 @@
   DCHECK(instr->key()->representation().IsTagged());
   DCHECK(instr->value()->representation().IsTagged());
 
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
-    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
-  }
+  LOperand* slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+  LOperand* vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
 
   LStoreKeyedGeneric* result = new (zone())
       LStoreKeyedGeneric(context, object, key, value, slot, vector);
@@ -2346,12 +2346,8 @@
   LOperand* object =
       UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
   LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
-    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
-  }
+  LOperand* slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+  LOperand* vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
 
   LStoreNamedGeneric* result =
       new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
diff --git a/src/crankshaft/x87/lithium-x87.h b/src/crankshaft/x87/lithium-x87.h
index 98703ae..d05d6eb 100644
--- a/src/crankshaft/x87/lithium-x87.h
+++ b/src/crankshaft/x87/lithium-x87.h
@@ -58,7 +58,6 @@
   V(ConstantI)                               \
   V(ConstantS)                               \
   V(ConstantT)                               \
-  V(ConstructDouble)                         \
   V(Context)                                 \
   V(DebugBreak)                              \
   V(DeclareGlobals)                          \
@@ -103,6 +102,7 @@
   V(LoadRoot)                                \
   V(MathAbs)                                 \
   V(MathClz32)                               \
+  V(MathCos)                                 \
   V(MathExp)                                 \
   V(MathFloor)                               \
   V(MathFround)                              \
@@ -112,6 +112,7 @@
   V(MathRound)                               \
   V(MathSqrt)                                \
   V(MaybeGrowElements)                       \
+  V(MathSin)                                 \
   V(ModByConstI)                             \
   V(ModByPowerOf2I)                          \
   V(ModI)                                    \
@@ -904,21 +905,29 @@
   DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
 };
 
-
-class LMathExp final : public LTemplateInstruction<1, 1, 2> {
+class LMathCos final : public LTemplateInstruction<1, 1, 0> {
  public:
-  LMathExp(LOperand* value,
-           LOperand* temp1,
-           LOperand* temp2) {
-    inputs_[0] = value;
-    temps_[0] = temp1;
-    temps_[1] = temp2;
-    ExternalReference::InitializeMathExpData();
-  }
+  explicit LMathCos(LOperand* value) { inputs_[0] = value; }
 
   LOperand* value() { return inputs_[0]; }
-  LOperand* temp1() { return temps_[0]; }
-  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
+
+class LMathSin final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathSin(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
+};
+
+class LMathExp final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathExp(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
 };
@@ -1579,18 +1588,14 @@
   DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
 };
 
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
  public:
-  LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
-                     LOperand* vector) {
+  LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
     inputs_[0] = context;
-    inputs_[1] = global_object;
     temps_[0] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
-  LOperand* global_object() { return inputs_[1]; }
   LOperand* temp_vector() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
@@ -2351,20 +2356,6 @@
 };
 
 
-class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
- public:
-  LConstructDouble(LOperand* hi, LOperand* lo) {
-    inputs_[0] = hi;
-    inputs_[1] = lo;
-  }
-
-  LOperand* hi() { return inputs_[0]; }
-  LOperand* lo() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
-};
-
-
 class LAllocate final : public LTemplateInstruction<1, 2, 1> {
  public:
   LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
@@ -2548,6 +2539,8 @@
   LInstruction* DoMathFround(HUnaryMathOperation* instr);
   LInstruction* DoMathAbs(HUnaryMathOperation* instr);
   LInstruction* DoMathLog(HUnaryMathOperation* instr);
+  LInstruction* DoMathCos(HUnaryMathOperation* instr);
+  LInstruction* DoMathSin(HUnaryMathOperation* instr);
   LInstruction* DoMathExp(HUnaryMathOperation* instr);
   LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
   LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
diff --git a/src/d8-posix.cc b/src/d8-posix.cc
index 36d83b5..fb14f95 100644
--- a/src/d8-posix.cc
+++ b/src/d8-posix.cc
@@ -531,21 +531,18 @@
   OpenFDCloser error_read_closer(exec_error_fds[kReadFD]);
   OpenFDCloser stdout_read_closer(stdout_fds[kReadFD]);
 
-  if (!ChildLaunchedOK(args.GetIsolate(), exec_error_fds)) return;
+  Isolate* isolate = args.GetIsolate();
+  if (!ChildLaunchedOK(isolate, exec_error_fds)) return;
 
-  Local<Value> accumulator = GetStdout(args.GetIsolate(), stdout_fds[kReadFD],
-                                       start_time, read_timeout, total_timeout);
+  Local<Value> accumulator = GetStdout(isolate, stdout_fds[kReadFD], start_time,
+                                       read_timeout, total_timeout);
   if (accumulator->IsUndefined()) {
     kill(pid, SIGINT);  // On timeout, kill the subprocess.
     args.GetReturnValue().Set(accumulator);
     return;
   }
 
-  if (!WaitForChild(args.GetIsolate(),
-                    pid,
-                    child_waiter,
-                    start_time,
-                    read_timeout,
+  if (!WaitForChild(isolate, pid, child_waiter, start_time, read_timeout,
                     total_timeout)) {
     return;
   }
diff --git a/src/d8.cc b/src/d8.cc
index 9466ab7..7b62707 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -338,7 +338,9 @@
     ScriptCompiler::CompileOptions compile_options, SourceType source_type) {
   Local<Context> context(isolate->GetCurrentContext());
   ScriptOrigin origin(name);
-  if (compile_options == ScriptCompiler::kNoCompileOptions) {
+  // TODO(adamk): Make use of compile options for Modules.
+  if (compile_options == ScriptCompiler::kNoCompileOptions ||
+      source_type == MODULE) {
     ScriptCompiler::Source script_source(source, origin);
     return source_type == SCRIPT
                ? ScriptCompiler::Compile(context, &script_source,
@@ -358,11 +360,9 @@
     DCHECK(false);  // A new compile option?
   }
   if (data == NULL) compile_options = ScriptCompiler::kNoCompileOptions;
+  DCHECK_EQ(SCRIPT, source_type);
   MaybeLocal<Script> result =
-      source_type == SCRIPT
-          ? ScriptCompiler::Compile(context, &cached_source, compile_options)
-          : ScriptCompiler::CompileModule(context, &cached_source,
-                                          compile_options);
+      ScriptCompiler::Compile(context, &cached_source, compile_options);
   CHECK(data == NULL || !data->rejected);
   return result;
 }
@@ -523,9 +523,8 @@
       Local<Context>::New(args.GetIsolate(), data->realms_[index])->Global());
 }
 
-
-// Realm.create() creates a new realm and returns its index.
-void Shell::RealmCreate(const v8::FunctionCallbackInfo<v8::Value>& args) {
+MaybeLocal<Context> Shell::CreateRealm(
+    const v8::FunctionCallbackInfo<v8::Value>& args) {
   Isolate* isolate = args.GetIsolate();
   TryCatch try_catch(isolate);
   PerIsolateData* data = PerIsolateData::Get(isolate);
@@ -542,12 +541,29 @@
   if (context.IsEmpty()) {
     DCHECK(try_catch.HasCaught());
     try_catch.ReThrow();
-    return;
+    return MaybeLocal<Context>();
   }
   data->realms_[index].Reset(isolate, context);
   args.GetReturnValue().Set(index);
+  return context;
 }
 
+// Realm.create() creates a new realm with a distinct security token
+// and returns its index.
+void Shell::RealmCreate(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  CreateRealm(args);
+}
+
+// Realm.createAllowCrossRealmAccess() creates a new realm with the same
+// security token as the current realm.
+void Shell::RealmCreateAllowCrossRealmAccess(
+    const v8::FunctionCallbackInfo<v8::Value>& args) {
+  Local<Context> context;
+  if (CreateRealm(args).ToLocal(&context)) {
+    context->SetSecurityToken(
+        args.GetIsolate()->GetEnteredContext()->GetSecurityToken());
+  }
+}
 
 // Realm.dispose(i) disposes the reference to the realm i.
 void Shell::RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -909,25 +925,28 @@
     // Print (filename):(line number): (message).
     v8::String::Utf8Value filename(message->GetScriptOrigin().ResourceName());
     const char* filename_string = ToCString(filename);
-    int linenum =
-        message->GetLineNumber(isolate->GetCurrentContext()).FromJust();
+    Maybe<int> maybeline = message->GetLineNumber(isolate->GetCurrentContext());
+    int linenum = maybeline.IsJust() ? maybeline.FromJust() : -1;
     printf("%s:%i: %s\n", filename_string, linenum, exception_string);
-    // Print line of source code.
-    v8::String::Utf8Value sourceline(
-        message->GetSourceLine(isolate->GetCurrentContext()).ToLocalChecked());
-    const char* sourceline_string = ToCString(sourceline);
-    printf("%s\n", sourceline_string);
-    // Print wavy underline (GetUnderline is deprecated).
-    int start =
-        message->GetStartColumn(isolate->GetCurrentContext()).FromJust();
-    for (int i = 0; i < start; i++) {
-      printf(" ");
+    Local<String> sourceline;
+    if (message->GetSourceLine(isolate->GetCurrentContext())
+            .ToLocal(&sourceline)) {
+      // Print line of source code.
+      v8::String::Utf8Value sourcelinevalue(sourceline);
+      const char* sourceline_string = ToCString(sourcelinevalue);
+      printf("%s\n", sourceline_string);
+      // Print wavy underline (GetUnderline is deprecated).
+      int start =
+          message->GetStartColumn(isolate->GetCurrentContext()).FromJust();
+      for (int i = 0; i < start; i++) {
+        printf(" ");
+      }
+      int end = message->GetEndColumn(isolate->GetCurrentContext()).FromJust();
+      for (int i = start; i < end; i++) {
+        printf("^");
+      }
+      printf("\n");
     }
-    int end = message->GetEndColumn(isolate->GetCurrentContext()).FromJust();
-    for (int i = start; i < end; i++) {
-      printf("^");
-    }
-    printf("\n");
     Local<Value> stack_trace_string;
     if (try_catch->StackTrace(isolate->GetCurrentContext())
             .ToLocal(&stack_trace_string) &&
@@ -1116,6 +1135,10 @@
       String::NewFromUtf8(isolate, "version", NewStringType::kNormal)
           .ToLocalChecked(),
       FunctionTemplate::New(isolate, Version));
+  global_template->Set(
+      Symbol::GetToStringTag(isolate),
+      String::NewFromUtf8(isolate, "global", NewStringType::kNormal)
+          .ToLocalChecked());
 
   // Bind the Realm object.
   Local<ObjectTemplate> realm_template = ObjectTemplate::New(isolate);
@@ -1136,6 +1159,11 @@
           .ToLocalChecked(),
       FunctionTemplate::New(isolate, RealmCreate));
   realm_template->Set(
+      String::NewFromUtf8(isolate, "createAllowCrossRealmAccess",
+                          NewStringType::kNormal)
+          .ToLocalChecked(),
+      FunctionTemplate::New(isolate, RealmCreateAllowCrossRealmAccess));
+  realm_template->Set(
       String::NewFromUtf8(isolate, "dispose", NewStringType::kNormal)
           .ToLocalChecked(),
       FunctionTemplate::New(isolate, RealmDispose));
@@ -2401,7 +2429,7 @@
 #endif  // defined(_MSC_VER)
 #endif  // defined(_WIN32) || defined(_WIN64)
   if (!SetOptions(argc, argv)) return 1;
-  v8::V8::InitializeICU(options.icu_data_file);
+  v8::V8::InitializeICUDefaultLocation(argv[0], options.icu_data_file);
 #ifndef V8_SHARED
   g_platform = i::FLAG_verify_predictable
                    ? new PredictablePlatform()
diff --git a/src/d8.h b/src/d8.h
index e51e8ee..36ec43e 100644
--- a/src/d8.h
+++ b/src/d8.h
@@ -7,8 +7,8 @@
 
 #ifndef V8_SHARED
 #include "src/allocation.h"
+#include "src/base/hashmap.h"
 #include "src/base/platform/time.h"
-#include "src/hashmap.h"
 #include "src/list.h"
 #else
 #include "include/v8.h"
@@ -61,13 +61,13 @@
  public:
   CounterMap(): hash_map_(Match) { }
   Counter* Lookup(const char* name) {
-    i::HashMap::Entry* answer =
+    base::HashMap::Entry* answer =
         hash_map_.Lookup(const_cast<char*>(name), Hash(name));
     if (!answer) return NULL;
     return reinterpret_cast<Counter*>(answer->value);
   }
   void Set(const char* name, Counter* value) {
-    i::HashMap::Entry* answer =
+    base::HashMap::Entry* answer =
         hash_map_.LookupOrInsert(const_cast<char*>(name), Hash(name));
     DCHECK(answer != NULL);
     answer->value = value;
@@ -81,14 +81,14 @@
     const char* CurrentKey() { return static_cast<const char*>(entry_->key); }
     Counter* CurrentValue() { return static_cast<Counter*>(entry_->value); }
    private:
-    i::HashMap* map_;
-    i::HashMap::Entry* entry_;
+    base::HashMap* map_;
+    base::HashMap::Entry* entry_;
   };
 
  private:
   static int Hash(const char* name);
   static bool Match(void* key1, void* key2);
-  i::HashMap hash_map_;
+  base::HashMap hash_map_;
 };
 #endif  // !V8_SHARED
 
@@ -350,7 +350,7 @@
 
 #ifndef V8_SHARED
   // TODO(binji): stupid implementation for now. Is there an easy way to hash an
-  // object for use in i::HashMap? By pointer?
+  // object for use in base::HashMap? By pointer?
   typedef i::List<Local<Object>> ObjectList;
   static bool SerializeValue(Isolate* isolate, Local<Value> value,
                              const ObjectList& to_transfer,
@@ -375,6 +375,8 @@
   static void RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args);
   static void RealmGlobal(const v8::FunctionCallbackInfo<v8::Value>& args);
   static void RealmCreate(const v8::FunctionCallbackInfo<v8::Value>& args);
+  static void RealmCreateAllowCrossRealmAccess(
+      const v8::FunctionCallbackInfo<v8::Value>& args);
   static void RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args);
   static void RealmSwitch(const v8::FunctionCallbackInfo<v8::Value>& args);
   static void RealmEval(const v8::FunctionCallbackInfo<v8::Value>& args);
@@ -469,6 +471,8 @@
   static void RunShell(Isolate* isolate);
   static bool SetOptions(int argc, char* argv[]);
   static Local<ObjectTemplate> CreateGlobalTemplate(Isolate* isolate);
+  static MaybeLocal<Context> CreateRealm(
+      const v8::FunctionCallbackInfo<v8::Value>& args);
 };
 
 
diff --git a/src/dateparser-inl.h b/src/dateparser-inl.h
index 7e5c4e3..47a7c6e 100644
--- a/src/dateparser-inl.h
+++ b/src/dateparser-inl.h
@@ -13,9 +13,8 @@
 namespace internal {
 
 template <typename Char>
-bool DateParser::Parse(Vector<Char> str,
-                       FixedArray* out,
-                       UnicodeCache* unicode_cache) {
+bool DateParser::Parse(Isolate* isolate, Vector<Char> str, FixedArray* out) {
+  UnicodeCache* unicode_cache = isolate->unicode_cache();
   DCHECK(out->length() >= OUTPUT_SIZE);
   InputReader<Char> in(unicode_cache, str);
   DateStringTokenizer<Char> scanner(&in);
@@ -76,10 +75,12 @@
   if (next_unhandled_token.IsInvalid()) return false;
   bool has_read_number = !day.IsEmpty();
   // If there's anything left, continue with the legacy parser.
+  bool legacy_parser = false;
   for (DateToken token = next_unhandled_token;
        !token.IsEndOfInput();
        token = scanner.Next()) {
     if (token.IsNumber()) {
+      legacy_parser = true;
       has_read_number = true;
       int n = token.number();
       if (scanner.SkipSymbol(':')) {
@@ -115,6 +116,7 @@
         scanner.SkipSymbol('-');
       }
     } else if (token.IsKeyword()) {
+      legacy_parser = true;
       // Parse a "word" (sequence of chars. >= 'A').
       KeywordType type = token.keyword_type();
       int value = token.keyword_value();
@@ -133,6 +135,7 @@
         if (scanner.Peek().IsNumber()) return false;
       }
     } else if (token.IsAsciiSign() && (tz.IsUTC() || !time.IsEmpty())) {
+      legacy_parser = true;
       // Parse UTC offset (only after UTC or time).
       tz.SetSign(token.ascii_sign());
       // The following number may be empty.
@@ -170,7 +173,13 @@
     }
   }
 
-  return day.Write(out) && time.Write(out) && tz.Write(out);
+  bool success = day.Write(out) && time.Write(out) && tz.Write(out);
+
+  if (legacy_parser && success) {
+    isolate->CountUsage(v8::Isolate::kLegacyDateParser);
+  }
+
+  return success;
 }
 
 
diff --git a/src/dateparser.h b/src/dateparser.h
index 5331739..d7676cb 100644
--- a/src/dateparser.h
+++ b/src/dateparser.h
@@ -26,7 +26,7 @@
   // [7]: UTC offset in seconds, or null value if no timezone specified
   // If parsing fails, return false (content of output array is not defined).
   template <typename Char>
-  static bool Parse(Vector<Char> str, FixedArray* output, UnicodeCache* cache);
+  static bool Parse(Isolate* isolate, Vector<Char> str, FixedArray* output);
 
   enum {
     YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, MILLISECOND, UTC_OFFSET, OUTPUT_SIZE
diff --git a/src/debug/arm/debug-arm.cc b/src/debug/arm/debug-arm.cc
index fa3540e..29e4827 100644
--- a/src/debug/arm/debug-arm.cc
+++ b/src/debug/arm/debug-arm.cc
@@ -41,7 +41,7 @@
 
 void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
                                        Handle<Code> code) {
-  DCHECK_EQ(Code::BUILTIN, code->kind());
+  DCHECK(code->is_debug_stub());
   CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotInstructions);
   // Patch the code changing the debug break slot code from
   //   mov r2, r2
diff --git a/src/debug/arm64/debug-arm64.cc b/src/debug/arm64/debug-arm64.cc
index cd01721..bf7964a 100644
--- a/src/debug/arm64/debug-arm64.cc
+++ b/src/debug/arm64/debug-arm64.cc
@@ -43,7 +43,7 @@
 
 void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
                                        Handle<Code> code) {
-  DCHECK_EQ(Code::BUILTIN, code->kind());
+  DCHECK(code->is_debug_stub());
   PatchingAssembler patcher(isolate, reinterpret_cast<Instruction*>(pc),
                             Assembler::kDebugBreakSlotInstructions);
   // Patch the code emitted by DebugCodegen::GenerateSlots, changing the debug
diff --git a/src/debug/debug-evaluate.cc b/src/debug/debug-evaluate.cc
index d5ebaa5..62b7a2b 100644
--- a/src/debug/debug-evaluate.cc
+++ b/src/debug/debug-evaluate.cc
@@ -250,7 +250,7 @@
     // referenced by the current function, so it can be correctly resolved.
     return;
   } else if (local_function->shared()->scope_info()->HasReceiver() &&
-             !frame_->receiver()->IsTheHole()) {
+             !frame_->receiver()->IsTheHole(isolate_)) {
     recv = handle(frame_->receiver(), isolate_);
   }
   JSObject::SetOwnPropertyIgnoreAttributes(target, name, recv, NONE).Check();
diff --git a/src/debug/debug-frames.cc b/src/debug/debug-frames.cc
index 453a77d..b1a8af2 100644
--- a/src/debug/debug-frames.cc
+++ b/src/debug/debug-frames.cc
@@ -123,7 +123,7 @@
         i < GetParametersCount()
             ? GetParameter(i)
             : Handle<Object>::cast(isolate_->factory()->undefined_value());
-    DCHECK(!value->IsTheHole());
+    DCHECK(!value->IsTheHole(isolate_));
 
     JSObject::SetOwnPropertyIgnoreAttributes(target, name, value, NONE).Check();
   }
@@ -135,8 +135,12 @@
     Handle<Object> value = GetExpression(scope_info->StackLocalIndex(i));
     // TODO(yangguo): We convert optimized out values to {undefined} when they
     // are passed to the debugger. Eventually we should handle them somehow.
-    if (value->IsTheHole()) value = isolate_->factory()->undefined_value();
-    if (value->IsOptimizedOut()) value = isolate_->factory()->undefined_value();
+    if (value->IsTheHole(isolate_)) {
+      value = isolate_->factory()->undefined_value();
+    }
+    if (value->IsOptimizedOut(isolate_)) {
+      value = isolate_->factory()->undefined_value();
+    }
     JSObject::SetOwnPropertyIgnoreAttributes(target, name, value, NONE).Check();
   }
 }
@@ -166,7 +170,7 @@
     if (ScopeInfo::VariableIsSynthetic(*name)) continue;
     if (ParameterIsShadowedByContextLocal(scope_info, name)) continue;
 
-    DCHECK(!frame_->GetParameter(i)->IsTheHole());
+    DCHECK(!frame_->GetParameter(i)->IsTheHole(isolate_));
     Handle<Object> value =
         Object::GetPropertyOrElement(target, name).ToHandleChecked();
     frame_->SetParameterValue(i, *value);
@@ -177,7 +181,7 @@
     Handle<String> name(scope_info->StackLocalName(i));
     if (ScopeInfo::VariableIsSynthetic(*name)) continue;
     int index = scope_info->StackLocalIndex(i);
-    if (frame_->GetExpression(index)->IsTheHole()) continue;
+    if (frame_->GetExpression(index)->IsTheHole(isolate_)) continue;
     Handle<Object> value =
         Object::GetPropertyOrElement(target, name).ToHandleChecked();
     frame_->SetExpression(index, *value);
diff --git a/src/debug/debug-scopes.cc b/src/debug/debug-scopes.cc
index 1602711..400b1e1 100644
--- a/src/debug/debug-scopes.cc
+++ b/src/debug/debug-scopes.cc
@@ -34,7 +34,7 @@
   Handle<JSFunction> function = GetFunction();
   Handle<SharedFunctionInfo> shared_info(function->shared());
   Handle<ScopeInfo> scope_info(shared_info->scope_info());
-  if (shared_info->script() == isolate->heap()->undefined_value()) {
+  if (shared_info->script()->IsUndefined(isolate)) {
     while (context_->closure() == *function) {
       context_ = Handle<Context>(context_->previous(), isolate_);
     }
@@ -494,7 +494,7 @@
   if (function_context->closure() == *function &&
       !function_context->IsNativeContext()) {
     CopyContextExtensionToScopeObject(function_context, local_scope,
-                                      INCLUDE_PROTOS);
+                                      KeyCollectionMode::kIncludePrototypes);
   }
 
   return local_scope;
@@ -520,7 +520,8 @@
 
   // Finally copy any properties from the function context extension. This will
   // be variables introduced by eval.
-  CopyContextExtensionToScopeObject(context, closure_scope, OWN_ONLY);
+  CopyContextExtensionToScopeObject(context, closure_scope,
+                                    KeyCollectionMode::kOwnOnly);
 
   return closure_scope;
 }
@@ -571,7 +572,8 @@
   if (!context.is_null()) {
     // Fill all context locals.
     CopyContextLocalsToScopeObject(CurrentScopeInfo(), context, inner_scope);
-    CopyContextExtensionToScopeObject(context, inner_scope, OWN_ONLY);
+    CopyContextExtensionToScopeObject(context, inner_scope,
+                                      KeyCollectionMode::kOwnOnly);
   }
   return inner_scope;
 }
@@ -754,7 +756,7 @@
     int context_index = Context::MIN_CONTEXT_SLOTS + i;
     Handle<Object> value = Handle<Object>(context->get(context_index), isolate);
     // Reflect variables under TDZ as undefined in scope object.
-    if (value->IsTheHole()) continue;
+    if (value->IsTheHole(isolate)) continue;
     // This should always succeed.
     // TODO(verwaest): Use AddDataProperty instead.
     JSObject::SetOwnPropertyIgnoreAttributes(scope_object, name, value, NONE)
@@ -764,11 +766,11 @@
 
 void ScopeIterator::CopyContextExtensionToScopeObject(
     Handle<Context> context, Handle<JSObject> scope_object,
-    KeyCollectionType type) {
+    KeyCollectionMode mode) {
   if (context->extension_object() == nullptr) return;
   Handle<JSObject> extension(context->extension_object());
   Handle<FixedArray> keys =
-      JSReceiver::GetKeys(extension, type, ENUMERABLE_STRINGS)
+      KeyAccumulator::GetKeys(extension, mode, ENUMERABLE_STRINGS)
           .ToHandleChecked();
 
   for (int i = 0; i < keys->length(); i++) {
@@ -784,6 +786,12 @@
 
 void ScopeIterator::GetNestedScopeChain(Isolate* isolate, Scope* scope,
                                         int position) {
+  if (scope->is_function_scope()) {
+    // Do not collect scopes of nested inner functions inside the current one.
+    Handle<JSFunction> function =
+        Handle<JSFunction>::cast(frame_inspector_->GetFunction());
+    if (scope->end_position() < function->shared()->end_position()) return;
+  }
   if (scope->is_hidden()) {
     // We need to add this chain element in case the scope has a context
     // associated. We need to keep the scope chain and context chain in sync.
diff --git a/src/debug/debug-scopes.h b/src/debug/debug-scopes.h
index 9560227..1338e7b 100644
--- a/src/debug/debug-scopes.h
+++ b/src/debug/debug-scopes.h
@@ -153,7 +153,7 @@
                                       Handle<JSObject> scope_object);
   void CopyContextExtensionToScopeObject(Handle<Context> context,
                                          Handle<JSObject> scope_object,
-                                         KeyCollectionType type);
+                                         KeyCollectionMode mode);
 
   // Get the chain of nested scopes within this scope for the source statement
   // position. The scopes will be added to the list from the outermost scope to
diff --git a/src/debug/debug.cc b/src/debug/debug.cc
index 3b5fb5f..c69b04b 100644
--- a/src/debug/debug.cc
+++ b/src/debug/debug.cc
@@ -477,8 +477,8 @@
   thread_local_.last_statement_position_ = RelocInfo::kNoPosition;
   thread_local_.last_fp_ = 0;
   thread_local_.target_fp_ = 0;
-  thread_local_.step_in_enabled_ = false;
   thread_local_.return_value_ = Handle<Object>();
+  clear_suspended_generator();
   // TODO(isolates): frames_are_dropped_?
   base::NoBarrier_Store(&thread_local_.current_debug_scope_,
                         static_cast<base::AtomicWord>(0));
@@ -486,25 +486,24 @@
 
 
 char* Debug::ArchiveDebug(char* storage) {
-  char* to = storage;
-  MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
+  // Simply reset state. Don't archive anything.
   ThreadInit();
   return storage + ArchiveSpacePerThread();
 }
 
 
 char* Debug::RestoreDebug(char* storage) {
-  char* from = storage;
-  MemCopy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
+  // Simply reset state. Don't restore anything.
+  ThreadInit();
   return storage + ArchiveSpacePerThread();
 }
 
+int Debug::ArchiveSpacePerThread() { return 0; }
 
-int Debug::ArchiveSpacePerThread() {
-  return sizeof(ThreadLocal);
+void Debug::Iterate(ObjectVisitor* v) {
+  v->VisitPointer(&thread_local_.suspended_generator_);
 }
 
-
 DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
   // Globalize the request debug info object and make it weak.
   GlobalHandles* global_handles = debug_info->GetIsolate()->global_handles();
@@ -537,9 +536,13 @@
   // Create the debugger context.
   HandleScope scope(isolate_);
   ExtensionConfiguration no_extensions;
+  // TODO(yangguo): we rely on the fact that first context snapshot is usable
+  //                as debug context. This dependency is gone once we remove
+  //                debug context completely.
+  static const int kFirstContextSnapshotIndex = 0;
   Handle<Context> context = isolate_->bootstrapper()->CreateEnvironment(
       MaybeHandle<JSGlobalProxy>(), v8::Local<ObjectTemplate>(), &no_extensions,
-      DEBUG_CONTEXT);
+      kFirstContextSnapshotIndex, DEBUG_CONTEXT);
 
   // Fail if no context could be created.
   if (context.is_null()) return false;
@@ -588,14 +591,14 @@
     // Return if we failed to retrieve the debug info.
     return;
   }
-  Handle<DebugInfo> debug_info(shared->GetDebugInfo());
+  Handle<DebugInfo> debug_info(shared->GetDebugInfo(), isolate_);
 
   // Find the break location where execution has stopped.
   BreakLocation location = BreakLocation::FromFrame(debug_info, frame);
 
   // Find actual break points, if any, and trigger debug break event.
   Handle<Object> break_points_hit = CheckBreakPoints(&location);
-  if (!break_points_hit->IsUndefined()) {
+  if (!break_points_hit->IsUndefined(isolate_)) {
     // Clear all current stepping setup.
     ClearStepping();
     // Notify the debug event listeners.
@@ -666,7 +669,7 @@
   // they are in a FixedArray.
   Handle<FixedArray> break_points_hit;
   int break_points_hit_count = 0;
-  DCHECK(!break_point_objects->IsUndefined());
+  DCHECK(!break_point_objects->IsUndefined(isolate_));
   if (break_point_objects->IsFixedArray()) {
     Handle<FixedArray> array(FixedArray::cast(*break_point_objects));
     break_points_hit = factory->NewFixedArray(array->length());
@@ -714,7 +717,7 @@
     Handle<Object> check_result =
         CheckBreakPoints(&break_locations[i], &has_break_points);
     has_break_points_at_all |= has_break_points;
-    if (has_break_points && !check_result->IsUndefined()) return false;
+    if (has_break_points && !check_result->IsUndefined(isolate_)) return false;
   }
   return has_break_points_at_all;
 }
@@ -753,7 +756,7 @@
   }
 
   // Return whether the break point is triggered.
-  return result->IsTrue();
+  return result->IsTrue(isolate_);
 }
 
 
@@ -795,7 +798,7 @@
   // Obtain shared function info for the function.
   Handle<Object> result =
       FindSharedFunctionInfoInScript(script, *source_position);
-  if (result->IsUndefined()) return false;
+  if (result->IsUndefined(isolate_)) return false;
 
   // Make sure the function has set up the debug info.
   Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>::cast(result);
@@ -842,7 +845,7 @@
   while (node != NULL) {
     Handle<Object> result =
         DebugInfo::FindBreakPointInfo(node->debug_info(), break_point_object);
-    if (!result->IsUndefined()) {
+    if (!result->IsUndefined(isolate_)) {
       // Get information in the break point.
       Handle<BreakPointInfo> break_point_info =
           Handle<BreakPointInfo>::cast(result);
@@ -932,14 +935,22 @@
 
 
 void Debug::PrepareStepIn(Handle<JSFunction> function) {
+  CHECK(last_step_action() >= StepIn);
   if (!is_active()) return;
-  if (last_step_action() < StepIn) return;
   if (in_debug_scope()) return;
-  if (thread_local_.step_in_enabled_) {
-    FloodWithOneShot(function);
-  }
+  FloodWithOneShot(function);
 }
 
+void Debug::PrepareStepInSuspendedGenerator() {
+  CHECK(has_suspended_generator());
+  if (!is_active()) return;
+  if (in_debug_scope()) return;
+  thread_local_.last_step_action_ = StepIn;
+  Handle<JSFunction> function(
+      JSGeneratorObject::cast(thread_local_.suspended_generator_)->function());
+  FloodWithOneShot(function);
+  clear_suspended_generator();
+}
 
 void Debug::PrepareStepOnThrow() {
   if (!is_active()) return;
@@ -994,10 +1005,7 @@
 
   feature_tracker()->Track(DebugFeatureTracker::kStepping);
 
-  // Remember this step action and count.
   thread_local_.last_step_action_ = step_action;
-  STATIC_ASSERT(StepFrame > StepIn);
-  thread_local_.step_in_enabled_ = (step_action >= StepIn);
 
   // If the function on the top frame is unresolved perform step out. This will
   // be the case when calling unknown function and having the debugger stopped
@@ -1041,6 +1049,8 @@
       debug_info->abstract_code()->SourceStatementPosition(
           summary.code_offset());
   thread_local_.last_fp_ = frame->UnpaddedFP();
+  // No longer perform the current async step.
+  clear_suspended_generator();
 
   switch (step_action) {
     case StepNone:
@@ -1057,11 +1067,7 @@
         Deoptimizer::DeoptimizeFunction(frames_it.frame()->function());
         frames_it.Advance();
       }
-      if (frames_it.done()) {
-        // Stepping out to the embedder. Disable step-in to avoid stepping into
-        // the next (unrelated) call that the embedder makes.
-        thread_local_.step_in_enabled_ = false;
-      } else {
+      if (!frames_it.done()) {
         // Fill the caller function to return to with one-shot break points.
         Handle<JSFunction> caller_function(frames_it.frame()->function());
         FloodWithOneShot(caller_function);
@@ -1092,19 +1098,18 @@
     Handle<SharedFunctionInfo> shared,
     BreakPositionAlignment position_alignment) {
   Isolate* isolate = shared->GetIsolate();
-  Heap* heap = isolate->heap();
   if (!shared->HasDebugInfo()) {
-    return Handle<Object>(heap->undefined_value(), isolate);
+    return isolate->factory()->undefined_value();
   }
   Handle<DebugInfo> debug_info(shared->GetDebugInfo());
   if (debug_info->GetBreakPointCount() == 0) {
-    return Handle<Object>(heap->undefined_value(), isolate);
+    return isolate->factory()->undefined_value();
   }
   Handle<FixedArray> locations =
       isolate->factory()->NewFixedArray(debug_info->GetBreakPointCount());
   int count = 0;
   for (int i = 0; i < debug_info->break_points()->length(); ++i) {
-    if (!debug_info->break_points()->get(i)->IsUndefined()) {
+    if (!debug_info->break_points()->get(i)->IsUndefined(isolate)) {
       BreakPointInfo* break_point_info =
           BreakPointInfo::cast(debug_info->break_points()->get(i));
       int break_points = break_point_info->GetBreakPointCount();
@@ -1130,7 +1135,6 @@
   ClearOneShot();
 
   thread_local_.last_step_action_ = StepNone;
-  thread_local_.step_in_enabled_ = false;
   thread_local_.last_statement_position_ = RelocInfo::kNoPosition;
   thread_local_.last_fp_ = 0;
   thread_local_.target_fp_ = 0;
@@ -1155,12 +1159,6 @@
 }
 
 
-void Debug::EnableStepIn() {
-  STATIC_ASSERT(StepFrame > StepIn);
-  thread_local_.step_in_enabled_ = (last_step_action() >= StepIn);
-}
-
-
 bool MatchingCodeTargets(Code* target1, Code* target2) {
   if (target1 == target2) return true;
   if (target1->kind() != target2->kind()) return false;
@@ -1313,9 +1311,7 @@
   {
     SharedFunctionInfo::Iterator iterator(isolate_);
     while (SharedFunctionInfo* shared = iterator.Next()) {
-      if (!shared->OptimizedCodeMapIsCleared()) {
-        shared->ClearOptimizedCodeMap();
-      }
+      shared->ClearCodeFromOptimizedCodeMap();
     }
   }
 
@@ -1323,6 +1319,7 @@
   isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
                                       "prepare for break points");
 
+  DCHECK(shared->is_compiled());
   bool is_interpreted = shared->HasBytecodeArray();
 
   {
@@ -1331,7 +1328,7 @@
     // smarter here and avoid the heap walk.
     HeapIterator iterator(isolate_->heap());
     HeapObject* obj;
-    bool include_generators = !is_interpreted && shared->is_generator();
+    bool find_resumables = !is_interpreted && shared->is_resumable();
 
     while ((obj = iterator.next())) {
       if (obj->IsJSFunction()) {
@@ -1342,7 +1339,9 @@
         }
         if (is_interpreted) continue;
         if (function->shared() == *shared) functions.Add(handle(function));
-      } else if (include_generators && obj->IsJSGeneratorObject()) {
+      } else if (find_resumables && obj->IsJSGeneratorObject()) {
+        // This case handles async functions as well, as they use generator
+        // objects for in-progress async function execution.
         JSGeneratorObject* generator_obj = JSGeneratorObject::cast(obj);
         if (!generator_obj->is_suspended()) continue;
         JSFunction* function = generator_obj->function();
@@ -1368,6 +1367,7 @@
 
   for (Handle<JSFunction> const function : functions) {
     function->ReplaceCode(shared->code());
+    JSFunction::EnsureLiterals(function);
   }
 
   for (Handle<JSGeneratorObject> const generator_obj : suspended_generators) {
@@ -1384,6 +1384,13 @@
   return true;
 }
 
+void Debug::RecordAsyncFunction(Handle<JSGeneratorObject> generator_object) {
+  if (last_step_action() <= StepOut) return;
+  if (!generator_object->function()->shared()->is_async()) return;
+  DCHECK(!has_suspended_generator());
+  thread_local_.suspended_generator_ = *generator_object;
+  ClearStepping();
+}
 
 class SharedFunctionInfoFinder {
  public:
@@ -1725,7 +1732,7 @@
   HandleScope scope(isolate_);
   // Check whether the promise has been marked as having triggered a message.
   Handle<Symbol> key = isolate_->factory()->promise_debug_marker_symbol();
-  if (JSReceiver::GetDataProperty(promise, key)->IsUndefined()) {
+  if (JSReceiver::GetDataProperty(promise, key)->IsUndefined(isolate_)) {
     OnException(value, promise);
   }
 }
@@ -1752,7 +1759,7 @@
     ASSIGN_RETURN_ON_EXCEPTION_VALUE(
         isolate_, has_reject_handler,
         PromiseHasUserDefinedRejectHandler(jspromise), /* void */);
-    uncaught = has_reject_handler->IsFalse();
+    uncaught = has_reject_handler->IsFalse(isolate_);
   }
   // Bail out if exception breaks are not active
   if (uncaught) {
@@ -2051,7 +2058,7 @@
                            request_args, &maybe_exception);
 
     if (maybe_result.ToHandle(&answer_value)) {
-      if (answer_value->IsUndefined()) {
+      if (answer_value->IsUndefined(isolate_)) {
         answer = isolate_->factory()->empty_string();
       } else {
         answer = Handle<String>::cast(answer_value);
@@ -2068,7 +2075,7 @@
           isolate_, is_running, cmd_processor, 1, is_running_args);
       Handle<Object> result;
       if (!maybe_result.ToHandle(&result)) break;
-      running = result->IsTrue();
+      running = result->IsTrue(isolate_);
     } else {
       Handle<Object> exception;
       if (!maybe_exception.ToHandle(&exception)) break;
@@ -2102,7 +2109,7 @@
   event_listener_data_ = Handle<Object>();
 
   // Set new entry.
-  if (!callback->IsUndefined() && !callback->IsNull()) {
+  if (!callback->IsUndefined(isolate_) && !callback->IsNull(isolate_)) {
     event_listener_ = global_handles->Create(*callback);
     if (data.is_null()) data = isolate_->factory()->undefined_value();
     event_listener_data_ = global_handles->Create(*data);
@@ -2492,6 +2499,9 @@
   return client_data_;
 }
 
+v8::Isolate* EventDetailsImpl::GetIsolate() const {
+  return reinterpret_cast<v8::Isolate*>(exec_state_->GetIsolate());
+}
 
 CommandMessage::CommandMessage() : text_(Vector<uint16_t>::empty()),
                                    client_data_(NULL) {
diff --git a/src/debug/debug.h b/src/debug/debug.h
index 2cdc151..eb2708c 100644
--- a/src/debug/debug.h
+++ b/src/debug/debug.h
@@ -9,13 +9,13 @@
 #include "src/arguments.h"
 #include "src/assembler.h"
 #include "src/base/atomicops.h"
+#include "src/base/hashmap.h"
 #include "src/base/platform/platform.h"
 #include "src/debug/liveedit.h"
 #include "src/execution.h"
 #include "src/factory.h"
 #include "src/flags.h"
 #include "src/frames.h"
-#include "src/hashmap.h"
 #include "src/interpreter/source-position-table.h"
 #include "src/runtime/runtime.h"
 #include "src/string-stream.h"
@@ -38,9 +38,10 @@
   StepNext = 1,   // Step to the next statement in the current function.
   StepIn = 2,     // Step into new functions invoked or the next statement
                   // in the current function.
-  StepFrame = 3   // Step into a new frame or return to previous frame.
-};
+  StepFrame = 3,  // Step into a new frame or return to previous frame.
 
+  LastStepAction = StepFrame
+};
 
 // Type of exception break. NOTE: These values are in macros.py as well.
 enum ExceptionBreakType {
@@ -305,6 +306,8 @@
   virtual v8::Local<v8::Context> GetEventContext() const;
   virtual v8::Local<v8::Value> GetCallbackData() const;
   virtual v8::Debug::ClientData* GetClientData() const;
+  virtual v8::Isolate* GetIsolate() const;
+
  private:
   DebugEvent event_;  // Debug event causing the break.
   Handle<JSObject> exec_state_;         // Current execution state.
@@ -456,13 +459,15 @@
   // Stepping handling.
   void PrepareStep(StepAction step_action);
   void PrepareStepIn(Handle<JSFunction> function);
+  void PrepareStepInSuspendedGenerator();
   void PrepareStepOnThrow();
   void ClearStepping();
   void ClearStepOut();
-  void EnableStepIn();
 
   bool PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared);
 
+  void RecordAsyncFunction(Handle<JSGeneratorObject> generator_object);
+
   // Returns whether the operation succeeded. Compilation can only be triggered
   // if a valid closure is passed as the second argument, otherwise the shared
   // function needs to be compiled already.
@@ -497,6 +502,7 @@
   char* RestoreDebug(char* from);
   static int ArchiveSpacePerThread();
   void FreeThreadResources() { }
+  void Iterate(ObjectVisitor* v);
 
   bool CheckExecutionState(int id) {
     return is_active() && !debug_context().is_null() && break_id() != 0 &&
@@ -540,8 +546,12 @@
     return reinterpret_cast<Address>(&after_break_target_);
   }
 
-  Address step_in_enabled_address() {
-    return reinterpret_cast<Address>(&thread_local_.step_in_enabled_);
+  Address last_step_action_address() {
+    return reinterpret_cast<Address>(&thread_local_.last_step_action_);
+  }
+
+  Address suspended_generator_address() {
+    return reinterpret_cast<Address>(&thread_local_.suspended_generator_);
   }
 
   StepAction last_step_action() { return thread_local_.last_step_action_; }
@@ -564,6 +574,14 @@
     return break_disabled_ || in_debug_event_listener_;
   }
 
+  void clear_suspended_generator() {
+    thread_local_.suspended_generator_ = Smi::FromInt(0);
+  }
+
+  bool has_suspended_generator() const {
+    return thread_local_.suspended_generator_ != Smi::FromInt(0);
+  }
+
   void OnException(Handle<Object> exception, Handle<Object> promise);
 
   // Constructors for debug event objects.
@@ -675,11 +693,6 @@
     // Frame pointer of the target frame we want to arrive at.
     Address target_fp_;
 
-    // Whether functions are flooded on entry for step-in and step-frame.
-    // If we stepped out to the embedder, disable flooding to spill stepping
-    // to the next call that the embedder makes.
-    bool step_in_enabled_;
-
     // Stores the way how LiveEdit has patched the stack. It is used when
     // debugger returns control back to user script.
     LiveEdit::FrameDropMode frame_drop_mode_;
@@ -687,6 +700,8 @@
     // Value of accumulator in interpreter frames. In non-interpreter frames
     // this value will be the hole.
     Handle<Object> return_value_;
+
+    Object* suspended_generator_;
   };
 
   // Storage location for registers when handling debug break calls
diff --git a/src/debug/debug.js b/src/debug/debug.js
index 38934b0..cbd4dd2 100644
--- a/src/debug/debug.js
+++ b/src/debug/debug.js
@@ -361,7 +361,7 @@
   } else {
     // We might want to account columns here as well.
     if (!(script.line_offset <= this.line_  &&
-          this.line_ < script.line_offset + script.lineCount())) {
+          this.line_ < script.line_offset + %ScriptLineCount(script))) {
       return false;
     }
     if (this.type_ == Debug.ScriptBreakPointType.ScriptName) {
@@ -383,11 +383,11 @@
   // first piece of breakable code on the line try to find the column on the
   // line which contains some source.
   if (IS_UNDEFINED(column)) {
-    var source_line = script.sourceLine(this.line());
+    var source_line = %ScriptSourceLine(script, line || script.line_offset);
 
     // Allocate array for caching the columns where the actual source starts.
     if (!script.sourceColumnStart_) {
-      script.sourceColumnStart_ = new GlobalArray(script.lineCount());
+      script.sourceColumnStart_ = new GlobalArray(%ScriptLineCount(script));
     }
 
     // Fill cache if needed and get column where the actual source starts.
@@ -536,14 +536,14 @@
 Debug.findFunctionSourceLocation = function(func, opt_line, opt_column) {
   var script = %FunctionGetScript(func);
   var script_offset = %FunctionGetScriptSourcePosition(func);
-  return script.locationFromLine(opt_line, opt_column, script_offset);
+  return %ScriptLocationFromLine(script, opt_line, opt_column, script_offset);
 };
 
 
 // Returns the character position in a script based on a line number and an
 // optional position within that line.
 Debug.findScriptSourcePosition = function(script, opt_line, opt_column) {
-  var location = script.locationFromLine(opt_line, opt_column);
+  var location = %ScriptLocationFromLine(script, opt_line, opt_column, 0);
   return location ? location.position : null;
 };
 
@@ -2085,18 +2085,34 @@
     return response.failed('No source');
   }
 
-  // Get the source slice and fill it into the response.
-  var slice = script.sourceSlice(from_line, to_line);
-  if (!slice) {
+  var raw_script = script.value();
+
+  // Sanitize arguments and remove line offset.
+  var line_offset = raw_script.line_offset;
+  var line_count = %ScriptLineCount(raw_script);
+  from_line = IS_UNDEFINED(from_line) ? 0 : from_line - line_offset;
+  to_line = IS_UNDEFINED(to_line) ? line_count : to_line - line_offset;
+
+  if (from_line < 0) from_line = 0;
+  if (to_line > line_count) to_line = line_count;
+
+  if (from_line >= line_count || to_line < 0 || from_line > to_line) {
     return response.failed('Invalid line interval');
   }
+
+  // Fill in the response.
+
   response.body = {};
-  response.body.source = slice.sourceText();
-  response.body.fromLine = slice.from_line;
-  response.body.toLine = slice.to_line;
-  response.body.fromPosition = slice.from_position;
-  response.body.toPosition = slice.to_position;
-  response.body.totalLines = script.lineCount();
+  response.body.fromLine = from_line + line_offset;
+  response.body.toLine = to_line + line_offset;
+  response.body.fromPosition = %ScriptLineStartPosition(raw_script, from_line);
+  response.body.toPosition =
+    (to_line == 0) ? 0 : %ScriptLineEndPosition(raw_script, to_line - 1);
+  response.body.totalLines = %ScriptLineCount(raw_script);
+
+  response.body.source = %_SubString(raw_script.source,
+                                     response.body.fromPosition,
+                                     response.body.toPosition);
 };
 
 
diff --git a/src/debug/ia32/debug-ia32.cc b/src/debug/ia32/debug-ia32.cc
index 056407f..8e4dee7 100644
--- a/src/debug/ia32/debug-ia32.cc
+++ b/src/debug/ia32/debug-ia32.cc
@@ -38,7 +38,7 @@
 
 void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
                                        Handle<Code> code) {
-  DCHECK_EQ(Code::BUILTIN, code->kind());
+  DCHECK(code->is_debug_stub());
   static const int kSize = Assembler::kDebugBreakSlotLength;
   CodePatcher patcher(isolate, pc, kSize);
 
diff --git a/src/debug/liveedit.cc b/src/debug/liveedit.cc
index 50d60a1..aa67537 100644
--- a/src/debug/liveedit.cc
+++ b/src/debug/liveedit.cc
@@ -621,11 +621,9 @@
   this->SetSmiValueField(kParentIndexOffset_, parent_index);
 }
 
-
-void FunctionInfoWrapper::SetFunctionCode(Handle<Code> function_code,
+void FunctionInfoWrapper::SetFunctionCode(Handle<AbstractCode> function_code,
                                           Handle<HeapObject> code_scope_info) {
   // CompileForLiveEdit must deliver full-codegen code.
-  DCHECK(function_code->kind() == Code::FUNCTION);
   Handle<JSValue> code_wrapper = WrapInJSValue(function_code);
   this->SetField(kCodeOffset_, code_wrapper);
 
@@ -640,27 +638,25 @@
   this->SetField(kSharedFunctionInfoOffset_, info_holder);
 }
 
-
-Handle<Code> FunctionInfoWrapper::GetFunctionCode() {
+Handle<AbstractCode> FunctionInfoWrapper::GetFunctionCode() {
   Handle<Object> element = this->GetField(kCodeOffset_);
   Handle<JSValue> value_wrapper = Handle<JSValue>::cast(element);
   Handle<Object> raw_result = UnwrapJSValue(value_wrapper);
-  CHECK(raw_result->IsCode());
-  return Handle<Code>::cast(raw_result);
+  CHECK(raw_result->IsAbstractCode());
+  return Handle<AbstractCode>::cast(raw_result);
 }
 
-
-MaybeHandle<TypeFeedbackVector> FunctionInfoWrapper::GetFeedbackVector() {
+MaybeHandle<TypeFeedbackMetadata> FunctionInfoWrapper::GetFeedbackMetadata() {
   Handle<Object> element = this->GetField(kSharedFunctionInfoOffset_);
   if (element->IsJSValue()) {
     Handle<JSValue> value_wrapper = Handle<JSValue>::cast(element);
     Handle<Object> raw_result = UnwrapJSValue(value_wrapper);
     Handle<SharedFunctionInfo> shared =
         Handle<SharedFunctionInfo>::cast(raw_result);
-    return Handle<TypeFeedbackVector>(shared->feedback_vector(), isolate());
+    return Handle<TypeFeedbackMetadata>(shared->feedback_metadata(), isolate());
   } else {
     // Scripts may never have a SharedFunctionInfo created.
-    return MaybeHandle<TypeFeedbackVector>();
+    return MaybeHandle<TypeFeedbackMetadata>();
   }
 }
 
@@ -863,11 +859,11 @@
  public:
   static void PatchLiterals(FunctionInfoWrapper* compile_info_wrapper,
                             Handle<SharedFunctionInfo> shared_info,
-                            Isolate* isolate) {
+                            bool feedback_metadata_changed, Isolate* isolate) {
     int new_literal_count = compile_info_wrapper->GetLiteralCount();
     int old_literal_count = shared_info->num_literals();
 
-    if (old_literal_count == new_literal_count) {
+    if (old_literal_count == new_literal_count && !feedback_metadata_changed) {
       // If literal count didn't change, simply go over all functions
       // and clear literal arrays.
       ClearValuesVisitor visitor;
@@ -878,10 +874,13 @@
       // collect all functions and fix their literal arrays.
       Handle<FixedArray> function_instances =
           CollectJSFunctions(shared_info, isolate);
-      Handle<TypeFeedbackVector> vector(shared_info->feedback_vector());
+      Handle<TypeFeedbackMetadata> feedback_metadata(
+          shared_info->feedback_metadata());
 
       for (int i = 0; i < function_instances->length(); i++) {
         Handle<JSFunction> fun(JSFunction::cast(function_instances->get(i)));
+        Handle<TypeFeedbackVector> vector =
+            TypeFeedbackVector::New(isolate, feedback_metadata);
         Handle<LiteralsArray> new_literals =
             LiteralsArray::New(isolate, vector, new_literal_count, TENURED);
         fun->set_literals(*new_literals);
@@ -929,10 +928,10 @@
   class ClearValuesVisitor {
    public:
     void visit(JSFunction* fun) {
-      FixedArray* literals = fun->literals();
-      int len = literals->length();
+      LiteralsArray* literals = fun->literals();
+      int len = literals->literals_count();
       for (int j = 0; j < len; j++) {
-        literals->set_undefined(j);
+        literals->set_literal_undefined(j);
       }
     }
   };
@@ -1007,18 +1006,20 @@
   SharedInfoWrapper shared_info_wrapper(shared_info_array);
 
   Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
+  bool feedback_metadata_changed = false;
 
   if (shared_info->is_compiled()) {
-    Handle<Code> new_code = compile_info_wrapper.GetFunctionCode();
-    Handle<Code> old_code(shared_info->code());
+    Handle<AbstractCode> new_code = compile_info_wrapper.GetFunctionCode();
     if (shared_info->HasBytecodeArray()) {
-      // The old code is interpreted. If we clear the bytecode array, the
-      // interpreter entry trampoline will self-heal and go to compiled code.
+      DCHECK(new_code->IsBytecodeArray());
+      // The old code is interpreted, the new code must be interpreted as well.
       shared_info->ClearBytecodeArray();
-      shared_info->ReplaceCode(*new_code);
+      shared_info->set_bytecode_array(BytecodeArray::cast(*new_code));
     } else {
+      Handle<Code> old_code(shared_info->code());
       DCHECK(old_code->kind() == Code::FUNCTION);
-      ReplaceCodeObject(old_code, new_code);
+      DCHECK(new_code->kind() == AbstractCode::FUNCTION);
+      ReplaceCodeObject(old_code, Handle<Code>::cast(new_code));
     }
     if (shared_info->HasDebugInfo()) {
       // Existing break points will be re-applied. Reset the debug info here.
@@ -1031,10 +1032,14 @@
     }
     shared_info->DisableOptimization(kLiveEdit);
     // Update the type feedback vector, if needed.
-    MaybeHandle<TypeFeedbackVector> feedback_vector =
-        compile_info_wrapper.GetFeedbackVector();
-    if (!feedback_vector.is_null()) {
-      shared_info->set_feedback_vector(*feedback_vector.ToHandleChecked());
+    MaybeHandle<TypeFeedbackMetadata> feedback_metadata =
+        compile_info_wrapper.GetFeedbackMetadata();
+    if (!feedback_metadata.is_null()) {
+      Handle<TypeFeedbackMetadata> checked_feedback_metadata =
+          feedback_metadata.ToHandleChecked();
+      feedback_metadata_changed = checked_feedback_metadata->DiffersFrom(
+          shared_info->feedback_metadata());
+      shared_info->set_feedback_metadata(*checked_feedback_metadata);
     }
   }
 
@@ -1043,7 +1048,8 @@
   shared_info->set_start_position(start_position);
   shared_info->set_end_position(end_position);
 
-  LiteralFixer::PatchLiterals(&compile_info_wrapper, shared_info, isolate);
+  LiteralFixer::PatchLiterals(&compile_info_wrapper, shared_info,
+                              feedback_metadata_changed, isolate);
 
   DeoptimizeDependentFunctions(*shared_info);
   isolate->compilation_cache()->Remove(shared_info);
@@ -1063,7 +1069,8 @@
                                  Handle<Object> script_handle) {
   Handle<SharedFunctionInfo> shared_info =
       UnwrapSharedFunctionInfoFromJSValue(function_wrapper);
-  CHECK(script_handle->IsScript() || script_handle->IsUndefined());
+  Isolate* isolate = function_wrapper->GetIsolate();
+  CHECK(script_handle->IsScript() || script_handle->IsUndefined(isolate));
   SharedFunctionInfo::SetScript(shared_info, script_handle);
   shared_info->DisableOptimization(kLiveEdit);
 
@@ -1219,18 +1226,13 @@
   }
 
   Vector<byte> buffer = buffer_writer.GetResult();
+  Handle<ByteArray> reloc_info =
+      isolate->factory()->NewByteArray(buffer.length(), TENURED);
 
-  if (buffer.length() == code->relocation_size()) {
-    // Simply patch relocation area of code.
-    MemCopy(code->relocation_start(), buffer.start(), buffer.length());
-    return code;
-  } else {
-    // Relocation info section now has different size. We cannot simply
-    // rewrite it inside code object. Instead we have to create a new
-    // code object.
-    Handle<Code> result(isolate->factory()->CopyCode(code, buffer));
-    return result;
-  }
+  DisallowHeapAllocation no_gc;
+  code->set_relocation_info(*reloc_info);
+  CopyBytes(code->relocation_start(), buffer.start(), buffer.length());
+  return code;
 }
 
 void PatchPositionsInBytecodeArray(Handle<BytecodeArray> bytecode,
@@ -1248,7 +1250,8 @@
                         iterator.is_statement());
   }
 
-  bytecode->set_source_position_table(*builder.ToSourcePositionTable());
+  Handle<ByteArray> source_position_table = builder.ToSourcePositionTable();
+  bytecode->set_source_position_table(*source_position_table);
 }
 }  // namespace
 
@@ -1601,7 +1604,7 @@
       Handle<Object> new_element =
           JSReceiver::GetElement(isolate, new_shared_array_, i)
               .ToHandleChecked();
-      if (new_element->IsUndefined()) return false;
+      if (new_element->IsUndefined(isolate)) return false;
       Handle<SharedFunctionInfo> new_shared =
           UnwrapSharedFunctionInfoFromJSValue(
               Handle<JSValue>::cast(new_element));
@@ -1619,6 +1622,21 @@
     return false;
   }
 
+  void set_status(LiveEdit::FunctionPatchabilityStatus status) {
+    Isolate* isolate = old_shared_array_->GetIsolate();
+    int len = GetArrayLength(old_shared_array_);
+    for (int i = 0; i < len; ++i) {
+      Handle<Object> old_element =
+          JSReceiver::GetElement(isolate, result_, i).ToHandleChecked();
+      if (!old_element->IsSmi() ||
+          Smi::cast(*old_element)->value() ==
+              LiveEdit::FUNCTION_AVAILABLE_FOR_PATCH) {
+        SetElementSloppy(result_, i,
+                         Handle<Smi>(Smi::FromInt(status), isolate));
+      }
+    }
+  }
+
  private:
   Handle<JSArray> old_shared_array_;
   Handle<JSArray> new_shared_array_;
@@ -1672,7 +1690,7 @@
     if (frame->is_java_script()) {
       SharedFunctionInfo* shared =
           JavaScriptFrame::cast(frame)->function()->shared();
-      if (shared->is_generator()) {
+      if (shared->is_resumable()) {
         non_droppable_frame_found = true;
         non_droppable_reason = LiveEdit::FUNCTION_BLOCKED_UNDER_GENERATOR;
         break;
@@ -1696,6 +1714,13 @@
           // Fail.
           return NULL;
         }
+        if (non_droppable_reason ==
+                LiveEdit::FUNCTION_BLOCKED_UNDER_GENERATOR &&
+            !target_frame_found) {
+          // Fail.
+          target.set_status(non_droppable_reason);
+          return NULL;
+        }
       }
     }
   }
@@ -1993,7 +2018,7 @@
   FunctionInfoWrapper info = FunctionInfoWrapper::cast(
       *JSReceiver::GetElement(isolate_, result_, current_parent_index_)
            .ToHandleChecked());
-  info.SetFunctionCode(Handle<Code>(shared->code()),
+  info.SetFunctionCode(Handle<AbstractCode>(shared->abstract_code()),
                        Handle<HeapObject>(shared->scope_info()));
   info.SetSharedFunctionInfo(shared);
 
diff --git a/src/debug/liveedit.h b/src/debug/liveedit.h
index af74043..32328d9 100644
--- a/src/debug/liveedit.h
+++ b/src/debug/liveedit.h
@@ -292,7 +292,7 @@
                             int end_position, int param_num, int literal_count,
                             int parent_index);
 
-  void SetFunctionCode(Handle<Code> function_code,
+  void SetFunctionCode(Handle<AbstractCode> function_code,
                        Handle<HeapObject> code_scope_info);
 
   void SetFunctionScopeInfo(Handle<Object> scope_info_array) {
@@ -309,9 +309,9 @@
     return this->GetSmiValueField(kParentIndexOffset_);
   }
 
-  Handle<Code> GetFunctionCode();
+  Handle<AbstractCode> GetFunctionCode();
 
-  MaybeHandle<TypeFeedbackVector> GetFeedbackVector();
+  MaybeHandle<TypeFeedbackMetadata> GetFeedbackMetadata();
 
   Handle<Object> GetCodeScopeInfo();
 
diff --git a/src/debug/mips/debug-mips.cc b/src/debug/mips/debug-mips.cc
index 8e00d61..49320d8 100644
--- a/src/debug/mips/debug-mips.cc
+++ b/src/debug/mips/debug-mips.cc
@@ -41,7 +41,7 @@
 
 void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
                                        Handle<Code> code) {
-  DCHECK_EQ(Code::BUILTIN, code->kind());
+  DCHECK(code->is_debug_stub());
   CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotInstructions);
   // Patch the code changing the debug break slot code from:
   //   nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1)
diff --git a/src/debug/mips64/debug-mips64.cc b/src/debug/mips64/debug-mips64.cc
index aad095b..2e967d7 100644
--- a/src/debug/mips64/debug-mips64.cc
+++ b/src/debug/mips64/debug-mips64.cc
@@ -40,7 +40,7 @@
 
 void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
                                        Handle<Code> code) {
-  DCHECK_EQ(Code::BUILTIN, code->kind());
+  DCHECK(code->is_debug_stub());
   CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotInstructions);
   // Patch the code changing the debug break slot code from:
   //   nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1)
diff --git a/src/debug/mirrors.js b/src/debug/mirrors.js
index d098c1c..6f839fe 100644
--- a/src/debug/mirrors.js
+++ b/src/debug/mirrors.js
@@ -1956,7 +1956,7 @@
 FrameMirror.prototype.sourceLineText = function() {
   var location = this.sourceLocation();
   if (location) {
-    return location.sourceText();
+    return location.sourceText;
   }
 };
 
@@ -2362,7 +2362,7 @@
 
 
 ScriptMirror.prototype.lineCount = function() {
-  return this.script_.lineCount();
+  return %ScriptLineCount(this.script_);
 };
 
 
@@ -2372,11 +2372,6 @@
 };
 
 
-ScriptMirror.prototype.sourceSlice = function (opt_from_line, opt_to_line) {
-  return this.script_.sourceSlice(opt_from_line, opt_to_line);
-};
-
-
 ScriptMirror.prototype.context = function() {
   return this.context_;
 };
diff --git a/src/debug/ppc/OWNERS b/src/debug/ppc/OWNERS
index eb007cb..752e8e3 100644
--- a/src/debug/ppc/OWNERS
+++ b/src/debug/ppc/OWNERS
@@ -3,3 +3,4 @@
 joransiu@ca.ibm.com
 mbrandy@us.ibm.com
 michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/src/debug/ppc/debug-ppc.cc b/src/debug/ppc/debug-ppc.cc
index a160bc2..7facf95 100644
--- a/src/debug/ppc/debug-ppc.cc
+++ b/src/debug/ppc/debug-ppc.cc
@@ -41,7 +41,7 @@
 
 void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
                                        Handle<Code> code) {
-  DCHECK_EQ(Code::BUILTIN, code->kind());
+  DCHECK(code->is_debug_stub());
   CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotInstructions);
   // Patch the code changing the debug break slot code from
   //
diff --git a/src/debug/s390/OWNERS b/src/debug/s390/OWNERS
index eb007cb..752e8e3 100644
--- a/src/debug/s390/OWNERS
+++ b/src/debug/s390/OWNERS
@@ -3,3 +3,4 @@
 joransiu@ca.ibm.com
 mbrandy@us.ibm.com
 michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/src/debug/s390/debug-s390.cc b/src/debug/s390/debug-s390.cc
index c6764c2..9c33b95 100644
--- a/src/debug/s390/debug-s390.cc
+++ b/src/debug/s390/debug-s390.cc
@@ -45,7 +45,7 @@
 
 void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
                                        Handle<Code> code) {
-  DCHECK_EQ(Code::BUILTIN, code->kind());
+  DCHECK(code->is_debug_stub());
   CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotLength);
   // Patch the code changing the debug break slot code from
   //
diff --git a/src/debug/x64/debug-x64.cc b/src/debug/x64/debug-x64.cc
index a85ddb3..910d1ca 100644
--- a/src/debug/x64/debug-x64.cc
+++ b/src/debug/x64/debug-x64.cc
@@ -39,7 +39,7 @@
 
 void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
                                        Handle<Code> code) {
-  DCHECK_EQ(Code::BUILTIN, code->kind());
+  DCHECK(code->is_debug_stub());
   static const int kSize = Assembler::kDebugBreakSlotLength;
   CodePatcher patcher(isolate, pc, kSize);
   Label check_codesize;
diff --git a/src/debug/x87/debug-x87.cc b/src/debug/x87/debug-x87.cc
index 029a004..1cbdf45 100644
--- a/src/debug/x87/debug-x87.cc
+++ b/src/debug/x87/debug-x87.cc
@@ -38,7 +38,7 @@
 
 void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
                                        Handle<Code> code) {
-  DCHECK_EQ(Code::BUILTIN, code->kind());
+  DCHECK(code->is_debug_stub());
   static const int kSize = Assembler::kDebugBreakSlotLength;
   CodePatcher patcher(isolate, pc, kSize);
 
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index adf4cf1..4cf41a9 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -13,7 +13,6 @@
 #include "src/global-handles.h"
 #include "src/interpreter/interpreter.h"
 #include "src/macro-assembler.h"
-#include "src/profiler/cpu-profiler.h"
 #include "src/tracing/trace-event.h"
 #include "src/v8.h"
 
@@ -56,9 +55,10 @@
 Code* Deoptimizer::FindDeoptimizingCode(Address addr) {
   if (function_->IsHeapObject()) {
     // Search all deoptimizing code in the native context of the function.
+    Isolate* isolate = function_->GetIsolate();
     Context* native_context = function_->context()->native_context();
     Object* element = native_context->DeoptimizedCodeListHead();
-    while (!element->IsUndefined()) {
+    while (!element->IsUndefined(isolate)) {
       Code* code = Code::cast(element);
       CHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
       if (code->contains(addr)) return code;
@@ -186,7 +186,8 @@
   // no longer refer to optimized code.
   JSFunction* prev = NULL;
   Object* element = context->OptimizedFunctionsListHead();
-  while (!element->IsUndefined()) {
+  Isolate* isolate = context->GetIsolate();
+  while (!element->IsUndefined(isolate)) {
     JSFunction* function = JSFunction::cast(element);
     Object* next = function->next_function_link();
     if (function->code()->kind() != Code::OPTIMIZED_FUNCTION ||
@@ -226,7 +227,7 @@
 
   // Run through the list of all native contexts.
   Object* context = isolate->heap()->native_contexts_list();
-  while (!context->IsUndefined()) {
+  while (!context->IsUndefined(isolate)) {
     VisitAllOptimizedFunctionsForContext(Context::cast(context), visitor);
     context = Context::cast(context)->next_context_link();
   }
@@ -315,7 +316,7 @@
   // Walk over all optimized code objects in this native context.
   Code* prev = NULL;
   Object* element = context->OptimizedCodeListHead();
-  while (!element->IsUndefined()) {
+  while (!element->IsUndefined(isolate)) {
     Code* code = Code::cast(element);
     CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION);
     Object* next = code->next_code_link();
@@ -385,7 +386,7 @@
   DisallowHeapAllocation no_allocation;
   // For all contexts, mark all code, then deoptimize.
   Object* context = isolate->heap()->native_contexts_list();
-  while (!context->IsUndefined()) {
+  while (!context->IsUndefined(isolate)) {
     Context* native_context = Context::cast(context);
     MarkAllCodeForContext(native_context);
     DeoptimizeMarkedCodeForContext(native_context);
@@ -406,7 +407,7 @@
   DisallowHeapAllocation no_allocation;
   // For all contexts, deoptimize code already marked.
   Object* context = isolate->heap()->native_contexts_list();
-  while (!context->IsUndefined()) {
+  while (!context->IsUndefined(isolate)) {
     Context* native_context = Context::cast(context);
     DeoptimizeMarkedCodeForContext(native_context);
     context = native_context->next_context_link();
@@ -416,7 +417,8 @@
 
 void Deoptimizer::MarkAllCodeForContext(Context* context) {
   Object* element = context->OptimizedCodeListHead();
-  while (!element->IsUndefined()) {
+  Isolate* isolate = context->GetIsolate();
+  while (!element->IsUndefined(isolate)) {
     Code* code = Code::cast(element);
     CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION);
     code->set_marked_for_deoptimization(true);
@@ -565,7 +567,7 @@
 
 
 void Deoptimizer::PrintFunctionName() {
-  if (function_->IsJSFunction()) {
+  if (function_ != nullptr && function_->IsJSFunction()) {
     function_->ShortPrint(trace_scope_->file());
   } else {
     PrintF(trace_scope_->file(),
@@ -660,10 +662,10 @@
   int length = 0;
   // Count all entries in the deoptimizing code list of every context.
   Object* context = isolate->heap()->native_contexts_list();
-  while (!context->IsUndefined()) {
+  while (!context->IsUndefined(isolate)) {
     Context* native_context = Context::cast(context);
     Object* element = native_context->DeoptimizedCodeListHead();
-    while (!element->IsUndefined()) {
+    while (!element->IsUndefined(isolate)) {
       Code* code = Code::cast(element);
       DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
       length++;
@@ -989,7 +991,7 @@
   }
   // Read the context from the translations.
   Object* context = context_pos->GetRawValue();
-  if (context == isolate_->heap()->undefined_value()) {
+  if (context->IsUndefined(isolate_)) {
     // If the context was optimized away, just use the context from
     // the activation. This should only apply to Crankshaft code.
     CHECK(!compiled_code_->is_turbofanned());
@@ -2451,6 +2453,10 @@
   buffer_->Add(reg.code(), zone());
 }
 
+void Translation::StoreFloatRegister(FloatRegister reg) {
+  buffer_->Add(FLOAT_REGISTER, zone());
+  buffer_->Add(reg.code(), zone());
+}
 
 void Translation::StoreDoubleRegister(DoubleRegister reg) {
   buffer_->Add(DOUBLE_REGISTER, zone());
@@ -2481,6 +2487,10 @@
   buffer_->Add(index, zone());
 }
 
+void Translation::StoreFloatStackSlot(int index) {
+  buffer_->Add(FLOAT_STACK_SLOT, zone());
+  buffer_->Add(index, zone());
+}
 
 void Translation::StoreDoubleStackSlot(int index) {
   buffer_->Add(DOUBLE_STACK_SLOT, zone());
@@ -2521,11 +2531,13 @@
     case INT32_REGISTER:
     case UINT32_REGISTER:
     case BOOL_REGISTER:
+    case FLOAT_REGISTER:
     case DOUBLE_REGISTER:
     case STACK_SLOT:
     case INT32_STACK_SLOT:
     case UINT32_STACK_SLOT:
     case BOOL_STACK_SLOT:
+    case FLOAT_STACK_SLOT:
     case DOUBLE_STACK_SLOT:
     case LITERAL:
     case COMPILED_STUB_FRAME:
@@ -2816,6 +2828,14 @@
 
 
 // static
+TranslatedValue TranslatedValue::NewFloat(TranslatedState* container,
+                                          float value) {
+  TranslatedValue slot(container, kFloat);
+  slot.float_value_ = value;
+  return slot;
+}
+
+// static
 TranslatedValue TranslatedValue::NewDouble(TranslatedState* container,
                                            double value) {
   TranslatedValue slot(container, kDouble);
@@ -2886,6 +2906,10 @@
   return uint32_value_;
 }
 
+float TranslatedValue::float_value() const {
+  DCHECK_EQ(kFloat, kind());
+  return float_value_;
+}
 
 double TranslatedValue::double_value() const {
   DCHECK_EQ(kDouble, kind());
@@ -2964,6 +2988,7 @@
     case TranslatedValue::kInt32:
     case TranslatedValue::kUInt32:
     case TranslatedValue::kBoolBit:
+    case TranslatedValue::kFloat:
     case TranslatedValue::kDouble: {
       MaterializeSimple();
       return value_.ToHandleChecked();
@@ -3005,6 +3030,10 @@
       value_ = Handle<Object>(isolate()->factory()->NewNumber(uint32_value()));
       return;
 
+    case kFloat:
+      value_ = Handle<Object>(isolate()->factory()->NewNumber(float_value()));
+      return;
+
     case kDouble:
       value_ = Handle<Object>(isolate()->factory()->NewNumber(double_value()));
       return;
@@ -3281,11 +3310,13 @@
     case Translation::INT32_REGISTER:
     case Translation::UINT32_REGISTER:
     case Translation::BOOL_REGISTER:
+    case Translation::FLOAT_REGISTER:
     case Translation::DOUBLE_REGISTER:
     case Translation::STACK_SLOT:
     case Translation::INT32_STACK_SLOT:
     case Translation::UINT32_STACK_SLOT:
     case Translation::BOOL_STACK_SLOT:
+    case Translation::FLOAT_STACK_SLOT:
     case Translation::DOUBLE_STACK_SLOT:
     case Translation::LITERAL:
       break;
@@ -3412,13 +3443,26 @@
       return TranslatedValue::NewBool(this, static_cast<uint32_t>(value));
     }
 
+    case Translation::FLOAT_REGISTER: {
+      int input_reg = iterator->Next();
+      if (registers == nullptr) return TranslatedValue::NewInvalid(this);
+      float value = registers->GetFloatRegister(input_reg);
+      if (trace_file != nullptr) {
+        PrintF(trace_file, "%e ; %s (float)", value,
+               RegisterConfiguration::Crankshaft()->GetFloatRegisterName(
+                   input_reg));
+      }
+      return TranslatedValue::NewFloat(this, value);
+    }
+
     case Translation::DOUBLE_REGISTER: {
       int input_reg = iterator->Next();
       if (registers == nullptr) return TranslatedValue::NewInvalid(this);
       double value = registers->GetDoubleRegister(input_reg);
       if (trace_file != nullptr) {
-        PrintF(trace_file, "%e ; %s (bool)", value,
-               DoubleRegister::from_code(input_reg).ToString());
+        PrintF(trace_file, "%e ; %s (double)", value,
+               RegisterConfiguration::Crankshaft()->GetDoubleRegisterName(
+                   input_reg));
       }
       return TranslatedValue::NewDouble(this, value);
     }
@@ -3469,6 +3513,17 @@
       return TranslatedValue::NewBool(this, value);
     }
 
+    case Translation::FLOAT_STACK_SLOT: {
+      int slot_offset =
+          OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
+      float value = ReadFloatValue(fp + slot_offset);
+      if (trace_file != nullptr) {
+        PrintF(trace_file, "%e ; (float) [fp %c %d] ", value,
+               slot_offset < 0 ? '-' : '+', std::abs(slot_offset));
+      }
+      return TranslatedValue::NewFloat(this, value);
+    }
+
     case Translation::DOUBLE_STACK_SLOT: {
       int slot_offset =
           OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
@@ -3616,6 +3671,7 @@
     case TranslatedValue::kInt32:
     case TranslatedValue::kUInt32:
     case TranslatedValue::kBoolBit:
+    case TranslatedValue::kFloat:
     case TranslatedValue::kDouble: {
       slot->MaterializeSimple();
       Handle<Object> value = slot->GetValue();
@@ -3684,7 +3740,9 @@
           }
           return object;
         }
-        case JS_OBJECT_TYPE: {
+        case JS_OBJECT_TYPE:
+        case JS_ERROR_TYPE:
+        case JS_ARGUMENTS_TYPE: {
           Handle<JSObject> object =
               isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED);
           slot->value_ = object;
@@ -3711,6 +3769,35 @@
           object->set_length(*length);
           return object;
         }
+        case JS_FUNCTION_TYPE: {
+          Handle<JSFunction> object =
+              isolate_->factory()->NewFunctionFromSharedFunctionInfo(
+                  handle(isolate_->object_function()->shared()),
+                  handle(isolate_->context()));
+          slot->value_ = object;
+          // We temporarily allocated a JSFunction for the {Object} function
+          // within the current context, to break cycles in the object graph.
+          // The correct function and context will be set below once available.
+          Handle<Object> properties = MaterializeAt(frame_index, value_index);
+          Handle<Object> elements = MaterializeAt(frame_index, value_index);
+          Handle<Object> prototype = MaterializeAt(frame_index, value_index);
+          Handle<Object> shared = MaterializeAt(frame_index, value_index);
+          Handle<Object> context = MaterializeAt(frame_index, value_index);
+          Handle<Object> literals = MaterializeAt(frame_index, value_index);
+          Handle<Object> entry = MaterializeAt(frame_index, value_index);
+          Handle<Object> next_link = MaterializeAt(frame_index, value_index);
+          object->ReplaceCode(*isolate_->builtins()->CompileLazy());
+          object->set_map(*map);
+          object->set_properties(FixedArray::cast(*properties));
+          object->set_elements(FixedArrayBase::cast(*elements));
+          object->set_prototype_or_initial_map(*prototype);
+          object->set_shared(SharedFunctionInfo::cast(*shared));
+          object->set_context(Context::cast(*context));
+          object->set_literals(LiteralsArray::cast(*literals));
+          CHECK(entry->IsNumber());  // Entry to compile lazy stub.
+          CHECK(next_link->IsUndefined(isolate_));
+          return object;
+        }
         case FIXED_ARRAY_TYPE: {
           Handle<Object> lengthObject = MaterializeAt(frame_index, value_index);
           int32_t length = 0;
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 1d413e6..db20406 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -39,6 +39,7 @@
     kInt32,
     kUInt32,
     kBoolBit,
+    kFloat,
     kDouble,
     kCapturedObject,    // Object captured by the escape analysis.
                         // The number of nested objects can be obtained
@@ -61,6 +62,7 @@
   static TranslatedValue NewDeferredObject(TranslatedState* container,
                                            int length, int object_index);
   static TranslatedValue NewDuplicateObject(TranslatedState* container, int id);
+  static TranslatedValue NewFloat(TranslatedState* container, float value);
   static TranslatedValue NewDouble(TranslatedState* container, double value);
   static TranslatedValue NewInt32(TranslatedState* container, int32_t value);
   static TranslatedValue NewUInt32(TranslatedState* container, uint32_t value);
@@ -93,6 +95,8 @@
     uint32_t uint32_value_;
     // kind is kInt32.
     int32_t int32_value_;
+    // kind is kFloat
+    float float_value_;
     // kind is kDouble
     double double_value_;
     // kind is kDuplicatedObject or kArgumentsObject or kCapturedObject.
@@ -103,6 +107,7 @@
   Object* raw_literal() const;
   int32_t int32_value() const;
   uint32_t uint32_value() const;
+  float float_value() const;
   double double_value() const;
   int object_length() const;
   int object_index() const;
@@ -334,7 +339,7 @@
   V(kInstanceMigrationFailed, "instance migration failed")                     \
   V(kInsufficientTypeFeedbackForCallWithArguments,                             \
     "Insufficient type feedback for call with arguments")                      \
-  V(kFastArrayPushFailed, "Falling off the fast path for FastArrayPush")       \
+  V(kFastPathFailed, "Falling off the fast path")                              \
   V(kInsufficientTypeFeedbackForCombinedTypeOfBinaryOperation,                 \
     "Insufficient type feedback for combined type of binary operation")        \
   V(kInsufficientTypeFeedbackForGenericNamedAccess,                            \
@@ -738,6 +743,11 @@
     return registers_[n];
   }
 
+  float GetFloatRegister(unsigned n) const {
+    DCHECK(n < arraysize(float_registers_));
+    return float_registers_[n];
+  }
+
   double GetDoubleRegister(unsigned n) const {
     DCHECK(n < arraysize(double_registers_));
     return double_registers_[n];
@@ -748,12 +758,18 @@
     registers_[n] = value;
   }
 
+  void SetFloatRegister(unsigned n, float value) {
+    DCHECK(n < arraysize(float_registers_));
+    float_registers_[n] = value;
+  }
+
   void SetDoubleRegister(unsigned n, double value) {
     DCHECK(n < arraysize(double_registers_));
     double_registers_[n] = value;
   }
 
   intptr_t registers_[Register::kNumRegisters];
+  float float_registers_[FloatRegister::kMaxNumRegisters];
   double double_registers_[DoubleRegister::kMaxNumRegisters];
 };
 
@@ -977,11 +993,13 @@
   V(INT32_REGISTER)                \
   V(UINT32_REGISTER)               \
   V(BOOL_REGISTER)                 \
+  V(FLOAT_REGISTER)                \
   V(DOUBLE_REGISTER)               \
   V(STACK_SLOT)                    \
   V(INT32_STACK_SLOT)              \
   V(UINT32_STACK_SLOT)             \
   V(BOOL_STACK_SLOT)               \
+  V(FLOAT_STACK_SLOT)              \
   V(DOUBLE_STACK_SLOT)             \
   V(LITERAL)
 
@@ -1023,11 +1041,13 @@
   void StoreInt32Register(Register reg);
   void StoreUint32Register(Register reg);
   void StoreBoolRegister(Register reg);
+  void StoreFloatRegister(FloatRegister reg);
   void StoreDoubleRegister(DoubleRegister reg);
   void StoreStackSlot(int index);
   void StoreInt32StackSlot(int index);
   void StoreUint32StackSlot(int index);
   void StoreBoolStackSlot(int index);
+  void StoreFloatStackSlot(int index);
   void StoreDoubleStackSlot(int index);
   void StoreLiteral(int literal_id);
   void StoreArgumentsObject(bool args_known, int args_index, int args_length);
diff --git a/src/disassembler.cc b/src/disassembler.cc
index c29022a..8fe1e7f 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -9,6 +9,7 @@
 #include "src/debug/debug.h"
 #include "src/deoptimizer.h"
 #include "src/disasm.h"
+#include "src/ic/ic.h"
 #include "src/macro-assembler.h"
 #include "src/snapshot/serializer-common.h"
 #include "src/string-stream.h"
@@ -36,7 +37,7 @@
       code_ == NULL ? NULL : code_->GetIsolate()->builtins()->Lookup(pc);
 
   if (name != NULL) {
-    SNPrintF(v8_buffer_, "%s  (%p)", name, pc);
+    SNPrintF(v8_buffer_, "%s  (%p)", name, static_cast<void*>(pc));
     return v8_buffer_.start();
   }
 
@@ -44,7 +45,7 @@
     int offs = static_cast<int>(pc - code_->instruction_start());
     // print as code offset, if it seems reasonable
     if (0 <= offs && offs < code_->instruction_size()) {
-      SNPrintF(v8_buffer_, "%d  (%p)", offs, pc);
+      SNPrintF(v8_buffer_, "%d  (%p)", offs, static_cast<void*>(pc));
       return v8_buffer_.start();
     }
   }
@@ -146,7 +147,8 @@
     }
 
     // Instruction address and instruction offset.
-    out.AddFormatted("%p  %4" V8PRIdPTRDIFF "  ", prev_pc, prev_pc - begin);
+    out.AddFormatted("%p  %4" V8PRIdPTRDIFF "  ", static_cast<void*>(prev_pc),
+                     prev_pc - begin);
 
     // Instruction.
     out.AddFormatted("%s", decode_buffer.start());
@@ -199,14 +201,16 @@
         Code* code = Code::GetCodeFromTargetAddress(relocinfo.target_address());
         Code::Kind kind = code->kind();
         if (code->is_inline_cache_stub()) {
-          if (kind == Code::LOAD_IC &&
-              LoadICState::GetTypeofMode(code->extra_ic_state()) ==
-                  NOT_INSIDE_TYPEOF) {
-            out.AddFormatted(" contextual,");
+          if (kind == Code::LOAD_GLOBAL_IC &&
+              LoadGlobalICState::GetTypeofMode(code->extra_ic_state()) ==
+                  INSIDE_TYPEOF) {
+            out.AddFormatted(" inside typeof,");
           }
-          InlineCacheState ic_state = code->ic_state();
-          out.AddFormatted(" %s, %s", Code::Kind2String(kind),
-              Code::ICState2String(ic_state));
+          out.AddFormatted(" %s", Code::Kind2String(kind));
+          if (!IC::ICUseVector(kind)) {
+            InlineCacheState ic_state = IC::StateFromCode(code);
+            out.AddFormatted(" %s", Code::ICState2String(ic_state));
+          }
         } else if (kind == Code::STUB || kind == Code::HANDLER) {
           // Get the STUB key and extract major and minor key.
           uint32_t key = code->stub_key();
diff --git a/src/eh-frame.cc b/src/eh-frame.cc
new file mode 100644
index 0000000..af85e0b
--- /dev/null
+++ b/src/eh-frame.cc
@@ -0,0 +1,96 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/eh-frame.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+static const int DW_EH_PE_pcrel = 0x10;
+static const int DW_EH_PE_datarel = 0x30;
+static const int DW_EH_PE_udata4 = 0x03;
+static const int DW_EH_PE_sdata4 = 0x0b;
+
+const int EhFrameHdr::kCIESize = 0;
+
+static const int kVersionSize = 1;
+static const int kEncodingSpecifiersSize = 3;
+
+//
+// In order to calculate offsets in the .eh_frame_hdr, we must know the layout
+// of the DSO generated by perf inject, which is assumed to be the following:
+//
+//  |      ...      |                        |
+//  +---------------+ <-- (F) ---            |  Larger offsets in file
+//  |               |           ^            |
+//  |  Instructions |           | .text      v
+//  |               |           v
+//  +---------------+ <-- (E) ---
+//  |///////////////|
+//  |////Padding////|
+//  |///////////////|
+//  +---------------+ <-- (D) ---
+//  |               |           ^
+//  |      CIE      |           |
+//  |               |           |
+//  +---------------+ <-- (C)   | .eh_frame
+//  |               |           |
+//  |      FDE      |           |
+//  |               |           v
+//  +---------------+ <-- (B) ---
+//  |    version    |           ^
+//  +---------------+           |
+//  |   encoding    |           |
+//  |  specifiers   |           |
+//  +---------------+ <---(A)   | .eh_frame_hdr
+//  |   offset to   |           |
+//  |   .eh_frame   |           |
+//  +---------------+           |
+//  |      ...      |          ...
+//
+// (F) is aligned at a 16-byte boundary.
+// (D) is aligned at a  8-byte boundary.
+// (B) is aligned at a  4-byte boundary.
+// (E), (C) and (A) have no alignment requirements.
+//
+// The distance between (A) and (B) is 4 bytes.
+//
+// The size of the .eh_frame is required to be a multiple of the pointer size,
+// which means that (B) will be naturally aligned to a 4-byte boundary on all
+// the architectures we support.
+//
+// Because (E) has no alignment requirements, there is padding between (E) and
+// (D). (F) is aligned at a 16-byte boundary, thus to a 8-byte one as well.
+//
+EhFrameHdr::EhFrameHdr(Code* code) {
+  int code_size = code->is_crankshafted() ? code->safepoint_table_offset()
+                                          : code->instruction_size();
+  version_ = 1;
+  eh_frame_ptr_encoding_ = DW_EH_PE_sdata4 | DW_EH_PE_pcrel;
+  lut_size_encoding_ = DW_EH_PE_udata4;
+  lut_entries_encoding_ = DW_EH_PE_sdata4 | DW_EH_PE_datarel;
+
+  // .eh_frame pointer and LUT
+  if (code->has_unwinding_info()) {
+    DCHECK_GE(code->unwinding_info_size(), EhFrameHdr::kRecordSize);
+    int eh_frame_size = code->unwinding_info_size() - EhFrameHdr::kRecordSize;
+
+    offset_to_eh_frame_ =
+        -(eh_frame_size + kVersionSize + kEncodingSpecifiersSize);  // A -> D
+    lut_entries_number_ = 1;
+    offset_to_procedure_ = -(RoundUp(code_size, 8) + eh_frame_size);  // B -> F
+    offset_to_fde_ = -(eh_frame_size - kCIESize);                     // B -> C
+  } else {
+    // Create a dummy table
+    offset_to_eh_frame_ = 0;
+    lut_entries_number_ = 0;
+    offset_to_procedure_ = 0;
+    offset_to_fde_ = 0;
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/eh-frame.h b/src/eh-frame.h
new file mode 100644
index 0000000..75781ac
--- /dev/null
+++ b/src/eh-frame.h
@@ -0,0 +1,41 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EH_FRAME_H_
+#define V8_EH_FRAME_H_
+
+#include <cstdint>
+
+namespace v8 {
+namespace internal {
+
+class Code;
+
+class EhFrameHdr final {
+ public:
+  static const int kRecordSize = 20;
+  static const int kCIESize;
+
+  explicit EhFrameHdr(Code* code);
+
+  int32_t offset_to_eh_frame() const { return offset_to_eh_frame_; }
+  uint32_t lut_entries_number() const { return lut_entries_number_; }
+  int32_t offset_to_procedure() const { return offset_to_procedure_; }
+  int32_t offset_to_fde() const { return offset_to_fde_; }
+
+ private:
+  uint8_t version_;
+  uint8_t eh_frame_ptr_encoding_;
+  uint8_t lut_size_encoding_;
+  uint8_t lut_entries_encoding_;
+  int32_t offset_to_eh_frame_;
+  uint32_t lut_entries_number_;
+  int32_t offset_to_procedure_;
+  int32_t offset_to_fde_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif
diff --git a/src/elements.cc b/src/elements.cc
index 6c257ac..d7e49cd 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -189,7 +189,7 @@
     int entry = from->FindEntry(i + from_start);
     if (entry != SeededNumberDictionary::kNotFound) {
       Object* value = from->ValueAt(entry);
-      DCHECK(!value->IsTheHole());
+      DCHECK(!value->IsTheHole(from->GetIsolate()));
       to->set(i + to_start, value, write_barrier_mode);
     } else {
       to->set_the_hole(i + to_start);
@@ -352,7 +352,7 @@
   for (uint32_t from_end = from_start + static_cast<uint32_t>(packed_size);
        from_start < from_end; from_start++, to_start++) {
     Object* smi = from->get(from_start);
-    DCHECK(!smi->IsTheHole());
+    DCHECK(!smi->IsTheHole(from->GetIsolate()));
     to->set(to_start, Smi::cast(smi)->value());
   }
 }
@@ -445,6 +445,28 @@
   JavaScriptFrame::PrintTop(isolate, stdout, false, true);
 }
 
+static void SortIndices(
+    Handle<FixedArray> indices, uint32_t sort_size,
+    WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER) {
+  struct {
+    bool operator()(Object* a, Object* b) {
+      if (a->IsSmi() || !a->IsUndefined(HeapObject::cast(a)->GetIsolate())) {
+        if (!b->IsSmi() && b->IsUndefined(HeapObject::cast(b)->GetIsolate())) {
+          return true;
+        }
+        return a->Number() < b->Number();
+      }
+      return !b->IsSmi() && b->IsUndefined(HeapObject::cast(b)->GetIsolate());
+    }
+  } cmp;
+  Object** start =
+      reinterpret_cast<Object**>(indices->GetFirstElementAddress());
+  std::sort(start, start + sort_size, cmp);
+  if (write_barrier_mode != SKIP_WRITE_BARRIER) {
+    FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(indices->GetIsolate()->heap(), *indices,
+                                       0, sort_size);
+  }
+}
 
 // Base class for element handler implementations. Contains the
 // the common logic for objects with different ElementsKinds.
@@ -709,8 +731,7 @@
     JSObject::ValidateElements(array);
   }
 
-  static uint32_t GetIterationLength(JSObject* receiver,
-                                     FixedArrayBase* elements) {
+  static uint32_t GetMaxIndex(JSObject* receiver, FixedArrayBase* elements) {
     if (receiver->IsJSArray()) {
       DCHECK(JSArray::cast(receiver)->length()->IsSmi());
       return static_cast<uint32_t>(
@@ -719,6 +740,11 @@
     return Subclass::GetCapacityImpl(receiver, elements);
   }
 
+  static uint32_t GetMaxNumberOfEntries(JSObject* receiver,
+                                        FixedArrayBase* elements) {
+    return Subclass::GetMaxIndex(receiver, elements);
+  }
+
   static Handle<FixedArrayBase> ConvertElementsWithCapacity(
       Handle<JSObject> object, Handle<FixedArrayBase> old_elements,
       ElementsKind from_kind, uint32_t capacity) {
@@ -862,8 +888,8 @@
       Handle<FixedArray> values_or_entries, bool get_entries, int* nof_items,
       PropertyFilter filter) {
     int count = 0;
-    KeyAccumulator accumulator(isolate, OWN_ONLY, ALL_PROPERTIES);
-    accumulator.NextPrototype();
+    KeyAccumulator accumulator(isolate, KeyCollectionMode::kOwnOnly,
+                               ALL_PROPERTIES);
     Subclass::CollectElementIndicesImpl(
         object, handle(object->elements(), isolate), &accumulator);
     Handle<FixedArray> keys = accumulator.GetKeys();
@@ -909,11 +935,12 @@
                                         KeyAccumulator* keys) {
     DCHECK_NE(DICTIONARY_ELEMENTS, kind());
     // Non-dictionary elements can't have all-can-read accessors.
-    uint32_t length = GetIterationLength(*object, *backing_store);
+    uint32_t length = Subclass::GetMaxIndex(*object, *backing_store);
     PropertyFilter filter = keys->filter();
+    Factory* factory = keys->isolate()->factory();
     for (uint32_t i = 0; i < length; i++) {
       if (Subclass::HasElementImpl(object, i, backing_store, filter)) {
-        keys->AddKey(i);
+        keys->AddKey(factory->NewNumberFromUint(i));
       }
     }
   }
@@ -923,10 +950,10 @@
       Handle<FixedArrayBase> backing_store, GetKeysConversion convert,
       PropertyFilter filter, Handle<FixedArray> list, uint32_t* nof_indices,
       uint32_t insertion_index = 0) {
-    uint32_t length = Subclass::GetIterationLength(*object, *backing_store);
+    uint32_t length = Subclass::GetMaxIndex(*object, *backing_store);
     for (uint32_t i = 0; i < length; i++) {
       if (Subclass::HasElementImpl(object, i, backing_store, filter)) {
-        if (convert == CONVERT_TO_STRING) {
+        if (convert == GetKeysConversion::kConvertToString) {
           Handle<String> index_string = isolate->factory()->Uint32ToString(i);
           list->set(insertion_index, *index_string);
         } else {
@@ -968,22 +995,11 @@
 
     // Sort the indices list if necessary.
     if (IsDictionaryElementsKind(kind()) || IsSloppyArgumentsElements(kind())) {
-      struct {
-        bool operator()(Object* a, Object* b) {
-          if (!a->IsUndefined()) {
-            if (b->IsUndefined()) return true;
-            return a->Number() < b->Number();
-          }
-          return !b->IsUndefined();
-        }
-      } cmp;
-      Object** start =
-          reinterpret_cast<Object**>(combined_keys->GetFirstElementAddress());
-      std::sort(start, start + nof_indices, cmp);
+      SortIndices(combined_keys, nof_indices, SKIP_WRITE_BARRIER);
       uint32_t array_length = 0;
       // Indices from dictionary elements should only be converted after
       // sorting.
-      if (convert == CONVERT_TO_STRING) {
+      if (convert == GetKeysConversion::kConvertToString) {
         for (uint32_t i = 0; i < nof_indices; i++) {
           Handle<Object> index_string = isolate->factory()->Uint32ToString(
                   combined_keys->get(i)->Number());
@@ -1044,7 +1060,7 @@
                  ? index
                  : kMaxUInt32;
     } else {
-      uint32_t length = GetIterationLength(holder, backing_store);
+      uint32_t length = Subclass::GetMaxIndex(holder, backing_store);
       return index < length ? index : kMaxUInt32;
     }
   }
@@ -1081,17 +1097,15 @@
       : ElementsAccessorBase<DictionaryElementsAccessor,
                              ElementsKindTraits<DICTIONARY_ELEMENTS> >(name) {}
 
-  static uint32_t GetIterationLength(JSObject* receiver,
-                                     FixedArrayBase* elements) {
-    uint32_t length;
-    if (receiver->IsJSArray()) {
-      // Special-case GetIterationLength for dictionary elements since the
-      // length of the array might be a HeapNumber.
-      JSArray::cast(receiver)->length()->ToArrayLength(&length);
-    } else {
-      length = GetCapacityImpl(receiver, elements);
-    }
-    return length;
+  static uint32_t GetMaxIndex(JSObject* receiver, FixedArrayBase* elements) {
+    // We cannot properly estimate this for dictionaries.
+    UNREACHABLE();
+  }
+
+  static uint32_t GetMaxNumberOfEntries(JSObject* receiver,
+                                        FixedArrayBase* backing_store) {
+    SeededNumberDictionary* dict = SeededNumberDictionary::cast(backing_store);
+    return dict->NumberOfElements();
   }
 
   static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
@@ -1162,7 +1176,7 @@
     uint32_t index = GetIndexForEntryImpl(*dict, entry);
     Handle<Object> result = SeededNumberDictionary::DeleteProperty(dict, entry);
     USE(result);
-    DCHECK(result->IsTrue());
+    DCHECK(result->IsTrue(dict->GetIsolate()));
     Handle<FixedArray> new_elements =
         SeededNumberDictionary::Shrink(dict, index);
     obj->set_elements(*new_elements);
@@ -1174,12 +1188,10 @@
     SeededNumberDictionary* dict = SeededNumberDictionary::cast(backing_store);
     if (!dict->requires_slow_elements()) return false;
     int capacity = dict->Capacity();
-    Heap* heap = holder->GetHeap();
-    Object* undefined = heap->undefined_value();
-    Object* the_hole = heap->the_hole_value();
+    Isolate* isolate = dict->GetIsolate();
     for (int i = 0; i < capacity; i++) {
       Object* key = dict->KeyAt(i);
-      if (key == the_hole || key == undefined) continue;
+      if (!dict->IsKey(isolate, key)) continue;
       DCHECK(!dict->IsDeleted(i));
       PropertyDetails details = dict->DetailsAt(i);
       if (details.type() == ACCESSOR_CONSTANT) return true;
@@ -1244,7 +1256,7 @@
     DisallowHeapAllocation no_gc;
     SeededNumberDictionary* dict = SeededNumberDictionary::cast(store);
     Object* index = dict->KeyAt(entry);
-    return !index->IsTheHole();
+    return !index->IsTheHole(dict->GetIsolate());
   }
 
   static uint32_t GetIndexForEntryImpl(FixedArrayBase* store, uint32_t entry) {
@@ -1289,23 +1301,12 @@
     return static_cast<uint32_t>(raw_key->Number());
   }
 
-  static uint32_t GetKeyForEntryImpl(Handle<SeededNumberDictionary> dictionary,
+  static uint32_t GetKeyForEntryImpl(Isolate* isolate,
+                                     Handle<SeededNumberDictionary> dictionary,
                                      int entry, PropertyFilter filter) {
     DisallowHeapAllocation no_gc;
     Object* raw_key = dictionary->KeyAt(entry);
-    if (!dictionary->IsKey(raw_key)) return kMaxUInt32;
-    return FilterKey(dictionary, entry, raw_key, filter);
-  }
-
-  static uint32_t GetKeyForEntryImpl(Handle<SeededNumberDictionary> dictionary,
-                                     int entry, PropertyFilter filter,
-                                     Object* undefined, Object* the_hole) {
-    DisallowHeapAllocation no_gc;
-    Object* raw_key = dictionary->KeyAt(entry);
-    // Replace the IsKey check with a direct comparison which is much faster.
-    if (raw_key == undefined || raw_key == the_hole) {
-      return kMaxUInt32;
-    }
+    if (!dictionary->IsKey(isolate, raw_key)) return kMaxUInt32;
     return FilterKey(dictionary, entry, raw_key, filter);
   }
 
@@ -1314,20 +1315,24 @@
                                         KeyAccumulator* keys) {
     if (keys->filter() & SKIP_STRINGS) return;
     Isolate* isolate = keys->isolate();
-    Handle<Object> undefined = isolate->factory()->undefined_value();
-    Handle<Object> the_hole = isolate->factory()->the_hole_value();
     Handle<SeededNumberDictionary> dictionary =
         Handle<SeededNumberDictionary>::cast(backing_store);
     int capacity = dictionary->Capacity();
+    Handle<FixedArray> elements = isolate->factory()->NewFixedArray(
+        GetMaxNumberOfEntries(*object, *backing_store));
+    int insertion_index = 0;
     PropertyFilter filter = keys->filter();
     for (int i = 0; i < capacity; i++) {
-      uint32_t key =
-          GetKeyForEntryImpl(dictionary, i, filter, *undefined, *the_hole);
+      uint32_t key = GetKeyForEntryImpl(isolate, dictionary, i, filter);
       if (key == kMaxUInt32) continue;
-      keys->AddKey(key);
+      Handle<Object> key_handle = isolate->factory()->NewNumberFromUint(key);
+      elements->set(insertion_index, *key_handle);
+      insertion_index++;
     }
-
-    keys->SortCurrentElementsList();
+    SortIndices(elements, insertion_index);
+    for (int i = 0; i < insertion_index; i++) {
+      keys->AddKey(elements->get(i));
+    }
   }
 
   static Handle<FixedArray> DirectCollectElementIndicesImpl(
@@ -1338,14 +1343,11 @@
     if (filter & SKIP_STRINGS) return list;
     if (filter & ONLY_ALL_CAN_READ) return list;
 
-    Handle<Object> undefined = isolate->factory()->undefined_value();
-    Handle<Object> the_hole = isolate->factory()->the_hole_value();
     Handle<SeededNumberDictionary> dictionary =
         Handle<SeededNumberDictionary>::cast(backing_store);
     uint32_t capacity = dictionary->Capacity();
     for (uint32_t i = 0; i < capacity; i++) {
-      uint32_t key =
-          GetKeyForEntryImpl(dictionary, i, filter, *undefined, *the_hole);
+      uint32_t key = GetKeyForEntryImpl(isolate, dictionary, i, filter);
       if (key == kMaxUInt32) continue;
       Handle<Object> index = isolate->factory()->NewNumberFromUint(key);
       list->set(insertion_index, *index);
@@ -1370,7 +1372,7 @@
       if (k == *the_hole) continue;
       if (dictionary->IsDeleted(i)) continue;
       Object* value = dictionary->ValueAt(i);
-      DCHECK(!value->IsTheHole());
+      DCHECK(!value->IsTheHole(isolate));
       DCHECK(!value->IsAccessorPair());
       DCHECK(!value->IsAccessorInfo());
       accumulator->AddKey(value, convert);
@@ -1552,8 +1554,8 @@
                                               KeyAccumulator* accumulator,
                                               AddKeyConversion convert) {
     Handle<FixedArrayBase> elements(receiver->elements(),
-                                    receiver->GetIsolate());
-    uint32_t length = Subclass::GetIterationLength(*receiver, *elements);
+                                    accumulator->isolate());
+    uint32_t length = Subclass::GetMaxNumberOfEntries(*receiver, *elements);
     for (uint32_t i = 0; i < length; i++) {
       if (IsFastPackedElementsKind(KindTraits::Kind) ||
           HasEntryImpl(*elements, i)) {
@@ -1801,7 +1803,7 @@
     }
     Subclass::SetLengthImpl(isolate, receiver, new_length, backing_store);
 
-    if (IsHoleyElementsKind(kind) && result->IsTheHole()) {
+    if (IsHoleyElementsKind(kind) && result->IsTheHole(isolate)) {
       return isolate->factory()->undefined_value();
     }
     return result;
@@ -1853,7 +1855,7 @@
     WriteBarrierMode mode = raw_backing_store->GetWriteBarrierMode(no_gc);
     for (uint32_t i = 0; i < copy_size; i++) {
       Object* argument = (*args)[src_index + i];
-      DCHECK(!argument->IsTheHole());
+      DCHECK(!argument->IsTheHole(raw_backing_store->GetIsolate()));
       Subclass::SetImpl(raw_backing_store, dst_index + i, argument, mode);
     }
   }
@@ -2230,7 +2232,7 @@
       Object* probe = parameter_map->get(entry + 2);
       Context* context = Context::cast(parameter_map->get(0));
       int context_entry = Smi::cast(probe)->value();
-      DCHECK(!context->get(context_entry)->IsTheHole());
+      DCHECK(!context->get(context_entry)->IsTheHole(isolate));
       return handle(context->get(context_entry), isolate);
     } else {
       // Object is not mapped, defer to the arguments.
@@ -2242,7 +2244,7 @@
         AliasedArgumentsEntry* alias = AliasedArgumentsEntry::cast(*result);
         Context* context = Context::cast(parameter_map->get(0));
         int context_entry = alias->aliased_context_slot();
-        DCHECK(!context->get(context_entry)->IsTheHole());
+        DCHECK(!context->get(context_entry)->IsTheHole(isolate));
         return handle(context->get(context_entry), isolate);
       }
       return result;
@@ -2267,7 +2269,7 @@
       Object* probe = parameter_map->get(entry + 2);
       Context* context = Context::cast(parameter_map->get(0));
       int context_entry = Smi::cast(probe)->value();
-      DCHECK(!context->get(context_entry)->IsTheHole());
+      DCHECK(!context->get(context_entry)->IsTheHole(store->GetIsolate()));
       context->set(context_entry, value);
     } else {
       FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
@@ -2276,7 +2278,7 @@
         AliasedArgumentsEntry* alias = AliasedArgumentsEntry::cast(current);
         Context* context = Context::cast(parameter_map->get(0));
         int context_entry = alias->aliased_context_slot();
-        DCHECK(!context->get(context_entry)->IsTheHole());
+        DCHECK(!context->get(context_entry)->IsTheHole(store->GetIsolate()));
         context->set(context_entry, value);
       } else {
         ArgumentsAccessor::SetImpl(arguments, entry - length, value);
@@ -2315,7 +2317,8 @@
     FixedArray* parameter_map = FixedArray::cast(parameters);
     uint32_t length = parameter_map->length() - 2;
     if (entry < length) {
-      return !GetParameterMapArg(parameter_map, entry)->IsTheHole();
+      return !GetParameterMapArg(parameter_map, entry)
+                  ->IsTheHole(parameter_map->GetIsolate());
     }
 
     FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
@@ -2344,7 +2347,7 @@
                                        uint32_t index, PropertyFilter filter) {
     FixedArray* parameter_map = FixedArray::cast(parameters);
     Object* probe = GetParameterMapArg(parameter_map, index);
-    if (!probe->IsTheHole()) return index;
+    if (!probe->IsTheHole(holder->GetIsolate())) return index;
 
     FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
     uint32_t entry = ArgumentsAccessor::GetEntryForIndexImpl(holder, arguments,
@@ -2386,18 +2389,16 @@
   static void CollectElementIndicesImpl(Handle<JSObject> object,
                                         Handle<FixedArrayBase> backing_store,
                                         KeyAccumulator* keys) {
-    FixedArray* parameter_map = FixedArray::cast(*backing_store);
-    uint32_t length = parameter_map->length() - 2;
-    for (uint32_t i = 0; i < length; ++i) {
-      if (!parameter_map->get(i + 2)->IsTheHole()) {
-        keys->AddKey(i);
-      }
-    }
-
-    Handle<FixedArrayBase> store(FixedArrayBase::cast(parameter_map->get(1)));
-    ArgumentsAccessor::CollectElementIndicesImpl(object, store, keys);
-    if (Subclass::kind() == FAST_SLOPPY_ARGUMENTS_ELEMENTS) {
-      keys->SortCurrentElementsList();
+    Isolate* isolate = keys->isolate();
+    uint32_t nof_indices = 0;
+    Handle<FixedArray> indices = isolate->factory()->NewFixedArray(
+        GetCapacityImpl(*object, *backing_store));
+    DirectCollectElementIndicesImpl(isolate, object, backing_store,
+                                    GetKeysConversion::kKeepNumbers,
+                                    ENUMERABLE_STRINGS, indices, &nof_indices);
+    SortIndices(indices, nof_indices);
+    for (uint32_t i = 0; i < nof_indices; i++) {
+      keys->AddKey(indices->get(i));
     }
   }
 
@@ -2410,8 +2411,8 @@
     uint32_t length = parameter_map->length() - 2;
 
     for (uint32_t i = 0; i < length; ++i) {
-      if (parameter_map->get(i + 2)->IsTheHole()) continue;
-      if (convert == CONVERT_TO_STRING) {
+      if (parameter_map->get(i + 2)->IsTheHole(isolate)) continue;
+      if (convert == GetKeysConversion::kConvertToString) {
         Handle<String> index_string = isolate->factory()->Uint32ToString(i);
         list->set(insertion_index, *index_string);
       } else {
@@ -2446,7 +2447,7 @@
     uint32_t index = GetIndexForEntryImpl(*dict, entry);
     Handle<Object> result = SeededNumberDictionary::DeleteProperty(dict, entry);
     USE(result);
-    DCHECK(result->IsTrue());
+    DCHECK(result->IsTrue(dict->GetIsolate()));
     Handle<FixedArray> new_elements =
         SeededNumberDictionary::Shrink(dict, index);
     parameter_map->set(1, *new_elements);
@@ -2479,25 +2480,25 @@
                               PropertyAttributes attributes) {
     Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(store);
     uint32_t length = parameter_map->length() - 2;
+    Isolate* isolate = store->GetIsolate();
     if (entry < length) {
       Object* probe = parameter_map->get(entry + 2);
-      DCHECK(!probe->IsTheHole());
+      DCHECK(!probe->IsTheHole(isolate));
       Context* context = Context::cast(parameter_map->get(0));
       int context_entry = Smi::cast(probe)->value();
-      DCHECK(!context->get(context_entry)->IsTheHole());
+      DCHECK(!context->get(context_entry)->IsTheHole(isolate));
       context->set(context_entry, *value);
 
       // Redefining attributes of an aliased element destroys fast aliasing.
       parameter_map->set_the_hole(entry + 2);
       // For elements that are still writable we re-establish slow aliasing.
       if ((attributes & READ_ONLY) == 0) {
-        Isolate* isolate = store->GetIsolate();
         value = isolate->factory()->NewAliasedArgumentsEntry(context_entry);
       }
 
       PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
       Handle<SeededNumberDictionary> arguments(
-          SeededNumberDictionary::cast(parameter_map->get(1)));
+          SeededNumberDictionary::cast(parameter_map->get(1)), isolate);
       arguments = SeededNumberDictionary::AddNumberEntry(
           arguments, entry, value, details, object->map()->is_prototype_map());
       // If the attributes were NONE, we would have called set rather than
@@ -2507,7 +2508,7 @@
       parameter_map->set(1, *arguments);
     } else {
       Handle<FixedArrayBase> arguments(
-          FixedArrayBase::cast(parameter_map->get(1)));
+          FixedArrayBase::cast(parameter_map->get(1)), isolate);
       DictionaryElementsAccessor::ReconfigureImpl(
           object, arguments, entry - length, value, attributes);
     }
@@ -2745,8 +2746,9 @@
                                         Handle<FixedArrayBase> backing_store,
                                         KeyAccumulator* keys) {
     uint32_t length = GetString(*object)->length();
+    Factory* factory = keys->isolate()->factory();
     for (uint32_t i = 0; i < length; i++) {
-      keys->AddKey(i);
+      keys->AddKey(factory->NewNumberFromUint(i));
     }
     BackingStoreAccessor::CollectElementIndicesImpl(object, backing_store,
                                                     keys);
diff --git a/src/execution.cc b/src/execution.cc
index 37e41d5..243bb8a 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -106,7 +106,7 @@
 #endif
 
   // Update the pending exception flag and return the value.
-  bool has_exception = value->IsException();
+  bool has_exception = value->IsException(isolate);
   DCHECK(has_exception == isolate->has_pending_exception());
   if (has_exception) {
     isolate->ReportPendingMessages();
@@ -140,7 +140,8 @@
     SaveContext save(isolate);
     isolate->set_context(function->context());
     DCHECK(function->context()->global_object()->IsJSGlobalObject());
-    auto value = Builtins::InvokeApiFunction(function, receiver, argc, argv);
+    auto value =
+        Builtins::InvokeApiFunction(isolate, function, receiver, argc, argv);
     bool has_exception = value.is_null();
     DCHECK(has_exception == isolate->has_pending_exception());
     if (has_exception) {
diff --git a/src/extensions/statistics-extension.cc b/src/extensions/statistics-extension.cc
index e6649a6..387bd31 100644
--- a/src/extensions/statistics-extension.cc
+++ b/src/extensions/statistics-extension.cc
@@ -135,8 +135,7 @@
     AddNumber(args.GetIsolate(), result, numbers[i].number, numbers[i].name);
   }
 
-  AddNumber64(args.GetIsolate(), result,
-              heap->amount_of_external_allocated_memory(),
+  AddNumber64(args.GetIsolate(), result, heap->external_memory(),
               "amount_of_external_allocated_memory");
   args.GetReturnValue().Set(result);
 }
diff --git a/src/external-reference-table.cc b/src/external-reference-table.cc
index 6b8b7d8..7e5ef84 100644
--- a/src/external-reference-table.cc
+++ b/src/external-reference-table.cc
@@ -67,8 +67,32 @@
       "power_double_double_function");
   Add(ExternalReference::power_double_int_function(isolate).address(),
       "power_double_int_function");
-  Add(ExternalReference::math_log_double_function(isolate).address(),
-      "std::log");
+  Add(ExternalReference::ieee754_atan_function(isolate).address(),
+      "base::ieee754::atan");
+  Add(ExternalReference::ieee754_atan2_function(isolate).address(),
+      "base::ieee754::atan2");
+  Add(ExternalReference::ieee754_atanh_function(isolate).address(),
+      "base::ieee754::atanh");
+  Add(ExternalReference::ieee754_cbrt_function(isolate).address(),
+      "base::ieee754::cbrt");
+  Add(ExternalReference::ieee754_cos_function(isolate).address(),
+      "base::ieee754::cos");
+  Add(ExternalReference::ieee754_exp_function(isolate).address(),
+      "base::ieee754::exp");
+  Add(ExternalReference::ieee754_expm1_function(isolate).address(),
+      "base::ieee754::expm1");
+  Add(ExternalReference::ieee754_log_function(isolate).address(),
+      "base::ieee754::log");
+  Add(ExternalReference::ieee754_log1p_function(isolate).address(),
+      "base::ieee754::log1p");
+  Add(ExternalReference::ieee754_log10_function(isolate).address(),
+      "base::ieee754::log10");
+  Add(ExternalReference::ieee754_log2_function(isolate).address(),
+      "base::ieee754::log2");
+  Add(ExternalReference::ieee754_sin_function(isolate).address(),
+      "base::ieee754::sin");
+  Add(ExternalReference::ieee754_tan_function(isolate).address(),
+      "base::ieee754::tan");
   Add(ExternalReference::store_buffer_top(isolate).address(),
       "store_buffer_top");
   Add(ExternalReference::address_of_the_hole_nan().address(), "the_hole_nan");
@@ -91,7 +115,7 @@
   Add(ExternalReference::get_mark_code_as_executed_function(isolate).address(),
       "Code::MarkCodeAsExecuted");
   Add(ExternalReference::is_profiling_address(isolate).address(),
-      "CpuProfiler::is_profiling");
+      "Isolate::is_profiling");
   Add(ExternalReference::scheduled_exception_address(isolate).address(),
       "Isolate::scheduled_exception");
   Add(ExternalReference::invoke_function_callback(isolate).address(),
@@ -148,22 +172,8 @@
       "f64_acos_wrapper");
   Add(ExternalReference::f64_asin_wrapper_function(isolate).address(),
       "f64_asin_wrapper");
-  Add(ExternalReference::f64_atan_wrapper_function(isolate).address(),
-      "f64_atan_wrapper");
-  Add(ExternalReference::f64_cos_wrapper_function(isolate).address(),
-      "f64_cos_wrapper");
-  Add(ExternalReference::f64_sin_wrapper_function(isolate).address(),
-      "f64_sin_wrapper");
-  Add(ExternalReference::f64_tan_wrapper_function(isolate).address(),
-      "f64_tan_wrapper");
-  Add(ExternalReference::f64_exp_wrapper_function(isolate).address(),
-      "f64_exp_wrapper");
-  Add(ExternalReference::f64_log_wrapper_function(isolate).address(),
-      "f64_log_wrapper");
   Add(ExternalReference::f64_pow_wrapper_function(isolate).address(),
       "f64_pow_wrapper");
-  Add(ExternalReference::f64_atan2_wrapper_function(isolate).address(),
-      "f64_atan2_wrapper");
   Add(ExternalReference::f64_mod_wrapper_function(isolate).address(),
       "f64_mod_wrapper");
   Add(ExternalReference::log_enter_external_function(isolate).address(),
@@ -189,8 +199,10 @@
       "Debug::after_break_target_address()");
   Add(ExternalReference::debug_is_active_address(isolate).address(),
       "Debug::is_active_address()");
-  Add(ExternalReference::debug_step_in_enabled_address(isolate).address(),
+  Add(ExternalReference::debug_last_step_action_address(isolate).address(),
       "Debug::step_in_enabled_address()");
+  Add(ExternalReference::debug_suspended_generator_address(isolate).address(),
+      "Debug::step_suspended_generator_address()");
 
 #ifndef V8_INTERPRETED_REGEXP
   Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
@@ -226,7 +238,7 @@
   };
 
   static const RefTableEntry c_builtins[] = {
-#define DEF_ENTRY_C(name, ignored) {Builtins::c_##name, "Builtins::" #name},
+#define DEF_ENTRY_C(name) {Builtins::c_##name, "Builtins::" #name},
       BUILTIN_LIST_C(DEF_ENTRY_C)
 #undef DEF_ENTRY_C
   };
@@ -238,8 +250,8 @@
   }
 
   static const RefTableEntry builtins[] = {
-#define DEF_ENTRY_C(name, ignored) {Builtins::k##name, "Builtins::" #name},
-#define DEF_ENTRY_A(name, i1, i2, i3) {Builtins::k##name, "Builtins::" #name},
+#define DEF_ENTRY_C(name) {Builtins::k##name, "Builtins::" #name},
+#define DEF_ENTRY_A(name, i1, i2) {Builtins::k##name, "Builtins::" #name},
       BUILTIN_LIST_C(DEF_ENTRY_C) BUILTIN_LIST_A(DEF_ENTRY_A)
           BUILTIN_LIST_DEBUG_A(DEF_ENTRY_A)
 #undef DEF_ENTRY_C
@@ -366,6 +378,15 @@
         Deoptimizer::CALCULATE_ENTRY_ADDRESS);
     Add(address, "lazy_deopt");
   }
+
+  // Add external references provided by the embedder (a null-terminated array).
+  intptr_t* api_external_references = isolate->api_external_references();
+  if (api_external_references != nullptr) {
+    while (*api_external_references != 0) {
+      Add(reinterpret_cast<Address>(*api_external_references), "<embedder>");
+      api_external_references++;
+    }
+  }
 }
 
 }  // namespace internal
diff --git a/src/factory.cc b/src/factory.cc
index 7d2dad0..bd4656e 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -96,6 +96,7 @@
   result->set_prototype_users(WeakFixedArray::Empty());
   result->set_registry_slot(PrototypeInfo::UNREGISTERED);
   result->set_validity_cell(Smi::FromInt(0));
+  result->set_bit_field(0);
   return result;
 }
 
@@ -291,12 +292,10 @@
   return result;
 }
 
-
-MaybeHandle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string,
+MaybeHandle<String> Factory::NewStringFromTwoByte(const uc16* string,
+                                                  int length,
                                                   PretenureFlag pretenure) {
-  int length = string.length();
-  const uc16* start = string.start();
-  if (String::IsOneByte(start, length)) {
+  if (String::IsOneByte(string, length)) {
     if (length == 1) return LookupSingleCharacterStringFromCode(string[0]);
     Handle<SeqOneByteString> result;
     ASSIGN_RETURN_ON_EXCEPTION(
@@ -304,7 +303,7 @@
         result,
         NewRawOneByteString(length, pretenure),
         String);
-    CopyChars(result->GetChars(), start, length);
+    CopyChars(result->GetChars(), string, length);
     return result;
   } else {
     Handle<SeqTwoByteString> result;
@@ -313,11 +312,21 @@
         result,
         NewRawTwoByteString(length, pretenure),
         String);
-    CopyChars(result->GetChars(), start, length);
+    CopyChars(result->GetChars(), string, length);
     return result;
   }
 }
 
+MaybeHandle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string,
+                                                  PretenureFlag pretenure) {
+  return NewStringFromTwoByte(string.start(), string.length(), pretenure);
+}
+
+MaybeHandle<String> Factory::NewStringFromTwoByte(
+    const ZoneVector<uc16>* string, PretenureFlag pretenure) {
+  return NewStringFromTwoByte(string->data(), static_cast<int>(string->size()),
+                              pretenure);
+}
 
 Handle<String> Factory::NewInternalizedStringFromUtf8(Vector<const char> str,
                                                       int chars,
@@ -704,6 +713,21 @@
   return external_string;
 }
 
+Handle<ExternalOneByteString> Factory::NewNativeSourceString(
+    const ExternalOneByteString::Resource* resource) {
+  size_t length = resource->length();
+  DCHECK_LE(length, static_cast<size_t>(String::kMaxLength));
+
+  Handle<Map> map = native_source_string_map();
+  Handle<ExternalOneByteString> external_string =
+      New<ExternalOneByteString>(map, OLD_SPACE);
+  external_string->set_length(static_cast<int>(length));
+  external_string->set_hash_field(String::kEmptyHashField);
+  external_string->set_resource(resource);
+
+  return external_string;
+}
+
 
 Handle<Symbol> Factory::NewSymbol() {
   CALL_HEAP_FUNCTION(
@@ -1207,7 +1231,7 @@
   function->set_code(info->code());
   function->set_context(*context);
   function->set_prototype_or_initial_map(*the_hole_value());
-  function->set_literals(LiteralsArray::cast(*empty_fixed_array()));
+  function->set_literals(LiteralsArray::cast(*empty_literals_array()));
   function->set_next_function_link(*undefined_value(), SKIP_WRITE_BARRIER);
   isolate()->heap()->InitializeJSObjectBody(*function, *map, JSFunction::kSize);
   return function;
@@ -1221,7 +1245,7 @@
   Handle<SharedFunctionInfo> info =
       NewSharedFunctionInfo(name, code, map->is_constructor());
   DCHECK(is_sloppy(info->language_mode()));
-  DCHECK(!map->IsUndefined());
+  DCHECK(!map->IsUndefined(isolate()));
   DCHECK(
       map.is_identical_to(isolate()->sloppy_function_map()) ||
       map.is_identical_to(isolate()->sloppy_function_without_prototype_map()) ||
@@ -1254,16 +1278,9 @@
 
 Handle<JSFunction> Factory::NewFunction(Handle<String> name, Handle<Code> code,
                                         Handle<Object> prototype,
-                                        bool read_only_prototype,
                                         bool is_strict) {
-  // In strict mode, readonly strict map is only available during bootstrap
-  DCHECK(!is_strict || !read_only_prototype ||
-         isolate()->bootstrapper()->IsActive());
-  Handle<Map> map =
-      is_strict ? isolate()->strict_function_map()
-                : read_only_prototype
-                      ? isolate()->sloppy_function_with_readonly_prototype_map()
-                      : isolate()->sloppy_function_map();
+  Handle<Map> map = is_strict ? isolate()->strict_function_map()
+                              : isolate()->sloppy_function_map();
   Handle<JSFunction> result = NewFunction(map, name, code);
   result->set_prototype_or_initial_map(*prototype);
   return result;
@@ -1273,22 +1290,19 @@
 Handle<JSFunction> Factory::NewFunction(Handle<String> name, Handle<Code> code,
                                         Handle<Object> prototype,
                                         InstanceType type, int instance_size,
-                                        bool read_only_prototype,
-                                        bool install_constructor,
                                         bool is_strict) {
   // Allocate the function
-  Handle<JSFunction> function =
-      NewFunction(name, code, prototype, read_only_prototype, is_strict);
+  Handle<JSFunction> function = NewFunction(name, code, prototype, is_strict);
 
   ElementsKind elements_kind =
       type == JS_ARRAY_TYPE ? FAST_SMI_ELEMENTS : FAST_HOLEY_SMI_ELEMENTS;
   Handle<Map> initial_map = NewMap(type, instance_size, elements_kind);
-  if (!function->shared()->is_generator()) {
-    if (prototype->IsTheHole()) {
+  // TODO(littledan): Why do we have this is_generator test when
+  // NewFunctionPrototype already handles finding an appropriately
+  // shared prototype?
+  if (!function->shared()->is_resumable()) {
+    if (prototype->IsTheHole(isolate())) {
       prototype = NewFunctionPrototype(function);
-    } else if (install_constructor) {
-      JSObject::AddProperty(Handle<JSObject>::cast(prototype),
-                            constructor_string(), function, DONT_ENUM);
     }
   }
 
@@ -1312,11 +1326,12 @@
   // can be from a different context.
   Handle<Context> native_context(function->context()->native_context());
   Handle<Map> new_map;
-  if (function->shared()->is_generator()) {
-    // Generator prototypes can share maps since they don't have "constructor"
-    // properties.
+  if (function->shared()->is_resumable()) {
+    // Generator and async function prototypes can share maps since they
+    // don't have "constructor" properties.
     new_map = handle(native_context->generator_object_prototype_map());
   } else {
+    CHECK(!function->shared()->is_async());
     // Each function prototype gets a fresh map to avoid unwanted sharing of
     // maps between prototypes of different constructors.
     Handle<JSFunction> object_function(native_context->object_function());
@@ -1327,7 +1342,7 @@
   DCHECK(!new_map->is_prototype_map());
   Handle<JSObject> prototype = NewJSObjectFromMap(new_map);
 
-  if (!function->shared()->is_generator()) {
+  if (!function->shared()->is_resumable()) {
     JSObject::AddProperty(prototype, constructor_string(), function, DONT_ENUM);
   }
 
@@ -1398,13 +1413,21 @@
                               bool is_debug) {
   Handle<ByteArray> reloc_info = NewByteArray(desc.reloc_size, TENURED);
 
+  bool has_unwinding_info = desc.unwinding_info != nullptr;
+  DCHECK((has_unwinding_info && desc.unwinding_info_size > 0) ||
+         (!has_unwinding_info && desc.unwinding_info_size == 0));
+
   // Compute size.
-  int body_size = RoundUp(desc.instr_size, kObjectAlignment);
-  int obj_size = Code::SizeFor(body_size);
+  int body_size = desc.instr_size;
+  int unwinding_info_size_field_size = kInt64Size;
+  if (has_unwinding_info) {
+    body_size = RoundUp(body_size, kInt64Size) + desc.unwinding_info_size +
+                unwinding_info_size_field_size;
+  }
+  int obj_size = Code::SizeFor(RoundUp(body_size, kObjectAlignment));
 
   Handle<Code> code = NewCodeRaw(obj_size, immovable);
-  DCHECK(isolate()->heap()->memory_allocator()->code_range() == NULL ||
-         !isolate()->heap()->memory_allocator()->code_range()->valid() ||
+  DCHECK(!isolate()->heap()->memory_allocator()->code_range()->valid() ||
          isolate()->heap()->memory_allocator()->code_range()->contains(
              code->address()) ||
          obj_size <= isolate()->heap()->code_space()->AreaSize());
@@ -1417,6 +1440,7 @@
   code->set_instruction_size(desc.instr_size);
   code->set_relocation_info(*reloc_info);
   code->set_flags(flags);
+  code->set_has_unwinding_info(has_unwinding_info);
   code->set_raw_kind_specific_flags1(0);
   code->set_raw_kind_specific_flags2(0);
   code->set_is_crankshafted(crankshafted);
@@ -1461,12 +1485,6 @@
 }
 
 
-Handle<Code> Factory::CopyCode(Handle<Code> code, Vector<byte> reloc_info) {
-  CALL_HEAP_FUNCTION(isolate(),
-                     isolate()->heap()->CopyCode(*code, reloc_info),
-                     Code);
-}
-
 Handle<BytecodeArray> Factory::CopyBytecodeArray(
     Handle<BytecodeArray> bytecode_array) {
   CALL_HEAP_FUNCTION(isolate(),
@@ -1994,7 +2012,7 @@
   } else {
     map = Handle<Map>(isolate()->proxy_map());
   }
-  DCHECK(map->prototype()->IsNull());
+  DCHECK(map->prototype()->IsNull(isolate()));
   Handle<JSProxy> result = New<JSProxy>(map, NEW_SPACE);
   result->initialize_properties();
   result->set_target(*target);
@@ -2066,12 +2084,6 @@
   shared->set_num_literals(number_of_literals);
   if (IsGeneratorFunction(kind)) {
     shared->set_instance_class_name(isolate()->heap()->Generator_string());
-    shared->DisableOptimization(kGenerator);
-  }
-  if (IsAsyncFunction(kind)) {
-    // TODO(caitp): Enable optimization of async functions when they are enabled
-    // for generators functions.
-    shared->DisableOptimization(kGenerator);
   }
   return shared;
 }
@@ -2126,9 +2138,7 @@
   StaticFeedbackVectorSpec empty_spec;
   Handle<TypeFeedbackMetadata> feedback_metadata =
       TypeFeedbackMetadata::New(isolate(), &empty_spec);
-  Handle<TypeFeedbackVector> feedback_vector =
-      TypeFeedbackVector::New(isolate(), feedback_metadata);
-  share->set_feedback_vector(*feedback_vector, SKIP_WRITE_BARRIER);
+  share->set_feedback_metadata(*feedback_metadata, SKIP_WRITE_BARRIER);
 #if TRACE_MAPS
   share->set_unique_id(isolate()->GetNextUniqueSharedFunctionInfoId());
 #endif
@@ -2204,7 +2214,7 @@
   isolate()->counters()->number_to_string_runtime()->Increment();
   if (check_number_string_cache) {
     Handle<Object> cached = GetNumberStringCache(number);
-    if (!cached->IsUndefined()) return Handle<String>::cast(cached);
+    if (!cached->IsUndefined(isolate())) return Handle<String>::cast(cached);
   }
 
   char arr[100];
@@ -2310,7 +2320,7 @@
 
   int cache_index = number_of_properties - 1;
   Handle<Object> maybe_cache(context->map_cache(), isolate());
-  if (maybe_cache->IsUndefined()) {
+  if (maybe_cache->IsUndefined(isolate())) {
     // Allocate the new map cache for the native context.
     maybe_cache = NewFixedArray(kMapCacheSize, TENURED);
     context->set_map_cache(*maybe_cache);
@@ -2366,6 +2376,7 @@
   store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(0));
   store->set(JSRegExp::kIrregexpCaptureCountIndex,
              Smi::FromInt(capture_count));
+  store->set(JSRegExp::kIrregexpCaptureNameMapIndex, uninitialized);
   regexp->set_data(*store);
 }
 
diff --git a/src/factory.h b/src/factory.h
index 51ba09d..f8b91a9 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -163,6 +163,9 @@
       Vector<const uc16> str,
       PretenureFlag pretenure = NOT_TENURED);
 
+  MUST_USE_RESULT MaybeHandle<String> NewStringFromTwoByte(
+      const ZoneVector<uc16>* str, PretenureFlag pretenure = NOT_TENURED);
+
   // Allocates an internalized string in old space based on the character
   // stream.
   Handle<String> NewInternalizedStringFromUtf8(Vector<const char> str,
@@ -224,6 +227,10 @@
       const ExternalOneByteString::Resource* resource);
   MUST_USE_RESULT MaybeHandle<String> NewExternalStringFromTwoByte(
       const ExternalTwoByteString::Resource* resource);
+  // Create a new external string object for one-byte encoded native script.
+  // It does not cache the resource data pointer.
+  Handle<ExternalOneByteString> NewNativeSourceString(
+      const ExternalOneByteString::Resource* resource);
 
   // Create a symbol.
   Handle<Symbol> NewSymbol();
@@ -501,7 +508,6 @@
 
   Handle<JSFunction> NewFunction(Handle<String> name, Handle<Code> code,
                                  Handle<Object> prototype,
-                                 bool read_only_prototype = false,
                                  bool is_strict = false);
   Handle<JSFunction> NewFunction(Handle<String> name);
   Handle<JSFunction> NewFunctionWithoutPrototype(Handle<String> name,
@@ -519,8 +525,6 @@
   Handle<JSFunction> NewFunction(Handle<String> name, Handle<Code> code,
                                  Handle<Object> prototype, InstanceType type,
                                  int instance_size,
-                                 bool read_only_prototype = false,
-                                 bool install_constructor = false,
                                  bool is_strict = false);
   Handle<JSFunction> NewFunction(Handle<String> name,
                                  Handle<Code> code,
@@ -696,6 +700,9 @@
                 AllocationSpace space,
                 Handle<AllocationSite> allocation_site);
 
+  MaybeHandle<String> NewStringFromTwoByte(const uc16* string, int length,
+                                           PretenureFlag pretenure);
+
   // Creates a code object that is not yet fully initialized yet.
   inline Handle<Code> NewCodeRaw(int object_size, bool immovable);
 
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 841d326..e3dcf29 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -188,7 +188,7 @@
 // Removing extra Promise functions is shipped
 DEFINE_NEG_VALUE_IMPLICATION(harmony_shipping, promise_extra, true)
 
-DEFINE_BOOL(intl_extra, true, "additional V8 Intl functions")
+DEFINE_BOOL(intl_extra, false, "additional V8 Intl functions")
 // Removing extra Intl functions is shipped
 DEFINE_NEG_VALUE_IMPLICATION(harmony_shipping, intl_extra, true)
 
@@ -197,49 +197,41 @@
 DEFINE_IMPLICATION(es_staging, move_object_start)
 
 // Features that are still work in progress (behind individual flags).
-#ifdef V8_I18N_SUPPORT
 #define HARMONY_INPROGRESS(V)                                           \
   V(harmony_array_prototype_values, "harmony Array.prototype.values")   \
   V(harmony_function_sent, "harmony function.sent")                     \
   V(harmony_sharedarraybuffer, "harmony sharedarraybuffer")             \
   V(harmony_simd, "harmony simd")                                       \
+  V(harmony_explicit_tailcalls, "harmony explicit tail calls")          \
   V(harmony_do_expressions, "harmony do-expressions")                   \
+  V(harmony_restrictive_generators,                                     \
+    "harmony restrictions on generator declarations")                   \
+  V(harmony_regexp_named_captures, "harmony regexp named captures")     \
   V(harmony_regexp_property, "harmony unicode regexp property classes") \
-  V(icu_case_mapping, "case mapping with ICU rather than Unibrow")      \
-  V(harmony_async_await, "harmony async-await")
-#else
-#define HARMONY_INPROGRESS(V)                                           \
-  V(harmony_array_prototype_values, "harmony Array.prototype.values")   \
-  V(harmony_function_sent, "harmony function.sent")                     \
-  V(harmony_sharedarraybuffer, "harmony sharedarraybuffer")             \
-  V(harmony_simd, "harmony simd")                                       \
-  V(harmony_do_expressions, "harmony do-expressions")                   \
-  V(harmony_regexp_property, "harmony unicode regexp property classes") \
-  V(harmony_async_await, "harmony async-await")
-#endif
+  V(harmony_for_in, "harmony for-in syntax")
 
 // Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED(V)                                                    \
-  V(harmony_for_in, "harmony for-in syntax")                                 \
+#define HARMONY_STAGED_BASE(V)                                               \
   V(harmony_regexp_lookbehind, "harmony regexp lookbehind")                  \
   V(harmony_tailcalls, "harmony tail calls")                                 \
-  V(harmony_explicit_tailcalls, "harmony explicit tail calls")               \
   V(harmony_object_values_entries, "harmony Object.values / Object.entries") \
   V(harmony_object_own_property_descriptors,                                 \
     "harmony Object.getOwnPropertyDescriptors()")                            \
+  V(harmony_async_await, "harmony async-await")                              \
   V(harmony_string_padding, "harmony String-padding methods")
 
+#ifdef V8_I18N_SUPPORT
+#define HARMONY_STAGED(V) \
+  HARMONY_STAGED_BASE(V)  \
+  V(icu_case_mapping, "case mapping with ICU rather than Unibrow")
+#else
+#define HARMONY_STAGED(V) HARMONY_STAGED_BASE(V)
+#endif
+
 // Features that are shipping (turned on by default, but internal flag remains).
 #define HARMONY_SHIPPING(V)                                           \
-  V(harmony_function_name, "harmony Function name inference")         \
-  V(harmony_instanceof, "harmony instanceof support")                 \
-  V(harmony_iterator_close, "harmony iterator finalization")          \
-  V(harmony_unicode_regexps, "harmony unicode regexps")               \
-  V(harmony_regexp_exec, "harmony RegExp exec override behavior")     \
-  V(harmony_regexp_subclass, "harmony regexp subclassing")            \
   V(harmony_restrictive_declarations,                                 \
     "harmony limitations on sloppy mode function declarations")       \
-  V(harmony_species, "harmony Symbol.species")                        \
   V(harmony_exponentiation_operator, "harmony exponentiation operator `**`")
 
 // Once a shipping feature has proved stable in the wild, it will be dropped
@@ -302,11 +294,16 @@
 
 // Flags for Ignition.
 DEFINE_BOOL(ignition, false, "use ignition interpreter")
-DEFINE_BOOL(ignition_eager, true, "eagerly compile and parse with ignition")
-DEFINE_BOOL(ignition_generators, false,
+DEFINE_BOOL(ignition_eager, false, "eagerly compile and parse with ignition")
+DEFINE_BOOL(ignition_generators, true,
             "enable experimental ignition support for generators")
 DEFINE_STRING(ignition_filter, "*", "filter for ignition interpreter")
+DEFINE_BOOL(ignition_deadcode, true,
+            "use ignition dead code elimination optimizer")
 DEFINE_BOOL(ignition_peephole, true, "use ignition peephole optimizer")
+DEFINE_BOOL(ignition_reo, true, "use ignition register equivalence optimizer")
+DEFINE_BOOL(ignition_filter_expression_positions, true,
+            "filter expression positions before the bytecode pipeline")
 DEFINE_BOOL(print_bytecode, false,
             "print bytecode generated by ignition interpreter")
 DEFINE_BOOL(trace_ignition, false,
@@ -424,9 +421,9 @@
 // Flags for TurboFan.
 DEFINE_BOOL(turbo, false, "enable TurboFan compiler")
 DEFINE_IMPLICATION(turbo, turbo_asm_deoptimization)
+DEFINE_IMPLICATION(turbo, turbo_type_feedback)
 DEFINE_BOOL(turbo_shipping, true, "enable TurboFan compiler on subset")
 DEFINE_BOOL(turbo_from_bytecode, false, "enable building graphs from bytecode")
-DEFINE_BOOL(turbo_greedy_regalloc, false, "use the greedy register allocator")
 DEFINE_BOOL(turbo_sp_frame_access, false,
             "use stack pointer-relative access to frame wherever possible")
 DEFINE_BOOL(turbo_preprocess_ranges, true,
@@ -448,7 +445,11 @@
             "enable deoptimization in TurboFan for asm.js code")
 DEFINE_BOOL(turbo_verify, DEBUG_BOOL, "verify TurboFan graphs at each phase")
 DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
+DEFINE_BOOL(turbo_stats_nvp, false,
+            "print TurboFan statistics in machine-readable format")
 DEFINE_BOOL(turbo_splitting, true, "split nodes during scheduling in TurboFan")
+DEFINE_BOOL(turbo_type_feedback, false,
+            "use typed feedback for representation inference in Turbofan")
 DEFINE_BOOL(turbo_source_positions, false,
             "track source code positions when building TurboFan IR")
 DEFINE_IMPLICATION(trace_turbo, turbo_source_positions)
@@ -475,15 +476,18 @@
             "enable instruction scheduling in TurboFan")
 DEFINE_BOOL(turbo_stress_instruction_scheduling, false,
             "randomly schedule instructions to stress dependency tracking")
+DEFINE_BOOL(turbo_store_elimination, false,
+            "enable store-store elimination in TurboFan")
 
 // Flags for native WebAssembly.
 DEFINE_BOOL(expose_wasm, false, "expose WASM interface to JavaScript")
-DEFINE_INT(wasm_num_compilation_tasks, 0,
+DEFINE_INT(wasm_num_compilation_tasks, 10,
            "number of parallel compilation tasks for wasm")
 DEFINE_BOOL(trace_wasm_encoder, false, "trace encoding of wasm code")
 DEFINE_BOOL(trace_wasm_decoder, false, "trace decoding of wasm code")
 DEFINE_BOOL(trace_wasm_decode_time, false, "trace decoding time of wasm code")
 DEFINE_BOOL(trace_wasm_compiler, false, "trace compiling of wasm code")
+DEFINE_BOOL(trace_wasm_interpreter, false, "trace interpretation of wasm code")
 DEFINE_INT(trace_wasm_ast_start, 0,
            "start function for WASM AST trace (inclusive)")
 DEFINE_INT(trace_wasm_ast_end, 0, "end function for WASM AST trace (exclusive)")
@@ -504,6 +508,9 @@
 DEFINE_INT(typed_array_max_size_in_heap, 64,
            "threshold for in-heap typed array")
 
+DEFINE_BOOL(wasm_jit_prototype, false,
+            "enable experimental wasm runtime dynamic code generation")
+
 // Profiler flags.
 DEFINE_INT(frame_count, 1, "number of stack frames inspected by the profiler")
 // 0x1800 fits in the immediate field of an ARM instruction.
@@ -543,8 +550,6 @@
             "enable use of NEON instructions if available (ARM only)")
 DEFINE_BOOL(enable_sudiv, true,
             "enable use of SDIV and UDIV instructions if available (ARM only)")
-DEFINE_BOOL(enable_mls, true,
-            "enable use of MLS instructions if available (ARM only)")
 DEFINE_BOOL(enable_movw_movt, false,
             "enable loading 32-bit constant by means of movw/movt "
             "instruction pairs (ARM only)")
@@ -562,7 +567,6 @@
 DEFINE_IMPLICATION(enable_armv8, enable_neon)
 DEFINE_IMPLICATION(enable_armv8, enable_32dregs)
 DEFINE_IMPLICATION(enable_armv8, enable_sudiv)
-DEFINE_IMPLICATION(enable_armv8, enable_mls)
 
 // bootstrapper.cc
 DEFINE_STRING(expose_natives_as, NULL, "expose natives in global object")
@@ -713,7 +717,7 @@
            "least this many unmarked objects")
 DEFINE_INT(max_incremental_marking_finalization_rounds, 3,
            "at most try this many times to finalize incremental marking")
-DEFINE_BOOL(black_allocation, true, "use black allocation")
+DEFINE_BOOL(black_allocation, false, "use black allocation")
 DEFINE_BOOL(concurrent_sweeping, true, "use concurrent sweeping")
 DEFINE_BOOL(parallel_compaction, true, "use parallel compaction")
 DEFINE_BOOL(parallel_pointer_update, true,
@@ -764,6 +768,7 @@
 // ic.cc
 DEFINE_BOOL(use_ic, true, "use inline caching")
 DEFINE_BOOL(trace_ic, false, "trace inline cache state transitions")
+DEFINE_BOOL(tf_load_ic_stub, true, "use TF LoadIC stub")
 
 // macro-assembler-ia32.cc
 DEFINE_BOOL(native_code_counters, false,
@@ -788,13 +793,8 @@
 
 // objects.cc
 DEFINE_BOOL(trace_weak_arrays, false, "Trace WeakFixedArray usage")
-DEFINE_BOOL(track_prototype_users, false,
-            "Keep track of which maps refer to a given prototype object")
 DEFINE_BOOL(trace_prototype_users, false,
             "Trace updates to prototype user tracking")
-DEFINE_BOOL(eliminate_prototype_chain_checks, true,
-            "Collapse prototype chain checks into single-cell checks")
-DEFINE_IMPLICATION(eliminate_prototype_chain_checks, track_prototype_users)
 DEFINE_BOOL(use_verbose_printer, true, "allows verbose printing")
 DEFINE_BOOL(trace_for_in_enumerate, false, "Trace for-in enumerate slow-paths")
 #if TRACE_MAPS
@@ -843,6 +843,7 @@
 DEFINE_INT(hash_seed, 0,
            "Fixed seed to use to hash property keys (0 means random)"
            "(with snapshots this option cannot override the baked-in seed)")
+DEFINE_BOOL(trace_rail, false, "trace RAIL mode")
 
 // runtime.cc
 DEFINE_BOOL(runtime_call_stats, false, "report runtime call counts and times")
@@ -1052,6 +1053,8 @@
 DEFINE_NEG_IMPLICATION(perf_prof, compact_code_space)
 DEFINE_BOOL(perf_prof_debug_info, false,
             "Enable debug info for perf linux profiler (experimental).")
+DEFINE_BOOL(perf_prof_unwinding_info, false,
+            "Enable unwinding info for perf linux profiler (experimental).")
 DEFINE_STRING(gc_fake_mmap, "/tmp/__v8_gc__",
               "Specify the name of the file for fake gc mmap used in ll_prof")
 DEFINE_BOOL(log_internal_timer_events, false, "Time internal events.")
diff --git a/src/flags.cc b/src/flags.cc
index f67defd..f7ae004 100644
--- a/src/flags.cc
+++ b/src/flags.cc
@@ -429,6 +429,10 @@
         PrintF(stderr, "Error: illegal value for flag %s of type %s\n"
                "Try --help for options\n",
                arg, Type2String(flag->type()));
+        if (is_bool_type) {
+          PrintF(stderr,
+                 "To set or unset a boolean flag, use --flag or --no-flag.\n");
+        }
         return_code = j;
         break;
       }
diff --git a/src/frames-inl.h b/src/frames-inl.h
index 998be23..c45f015 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -233,6 +233,9 @@
     StackFrameIteratorBase* iterator) : JavaScriptFrame(iterator) {
 }
 
+inline BuiltinFrame::BuiltinFrame(StackFrameIteratorBase* iterator)
+    : JavaScriptFrame(iterator) {}
+
 inline WasmFrame::WasmFrame(StackFrameIteratorBase* iterator)
     : StandardFrame(iterator) {}
 
diff --git a/src/frames.cc b/src/frames.cc
index a8fe6bb..698b935 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -16,7 +16,6 @@
 #include "src/safepoint-table.h"
 #include "src/string-stream.h"
 #include "src/vm-state-inl.h"
-#include "src/wasm/wasm-module.h"
 
 namespace v8 {
 namespace internal {
@@ -400,11 +399,15 @@
       isolate->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
   Code* interpreter_bytecode_dispatch =
       isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
+  Code* interpreter_baseline_on_return =
+      isolate->builtins()->builtin(Builtins::kInterpreterMarkBaselineOnReturn);
 
   return (pc >= interpreter_entry_trampoline->instruction_start() &&
           pc < interpreter_entry_trampoline->instruction_end()) ||
          (pc >= interpreter_bytecode_dispatch->instruction_start() &&
-          pc < interpreter_bytecode_dispatch->instruction_end());
+          pc < interpreter_bytecode_dispatch->instruction_end()) ||
+         (pc >= interpreter_baseline_on_return->instruction_start() &&
+          pc < interpreter_baseline_on_return->instruction_end());
 }
 
 StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
@@ -445,17 +448,20 @@
     Code* code_obj =
         GetContainingCode(iterator->isolate(), *(state->pc_address));
     if (code_obj != nullptr) {
-      if (code_obj->is_interpreter_entry_trampoline() ||
-          code_obj->is_interpreter_enter_bytecode_dispatch()) {
-        return INTERPRETED;
-      }
       switch (code_obj->kind()) {
         case Code::BUILTIN:
           if (marker->IsSmi()) break;
-          // We treat frames for BUILTIN Code objects as OptimizedFrame for now
-          // (all the builtins with JavaScript linkage are actually generated
-          // with TurboFan currently, so this is sound).
-          return OPTIMIZED;
+          if (code_obj->is_interpreter_trampoline_builtin()) {
+            return INTERPRETED;
+          }
+          if (code_obj->is_turbofanned()) {
+            // TODO(bmeurer): We treat frames for BUILTIN Code objects as
+            // OptimizedFrame for now (all the builtins with JavaScript
+            // linkage are actually generated with TurboFan currently, so
+            // this is sound).
+            return OPTIMIZED;
+          }
+          return BUILTIN;
         case Code::FUNCTION:
           return JAVA_SCRIPT;
         case Code::OPTIMIZED_FUNCTION:
@@ -600,12 +606,13 @@
   return EXIT;
 }
 
-
 Address ExitFrame::ComputeStackPointer(Address fp) {
+#if defined(USE_SIMULATOR)
+  MSAN_MEMORY_IS_INITIALIZED(fp + ExitFrameConstants::kSPOffset, kPointerSize);
+#endif
   return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
 }
 
-
 void ExitFrame::FillState(Address fp, Address sp, State* state) {
   state->sp = sp;
   state->fp = fp;
@@ -689,6 +696,7 @@
       case JAVA_SCRIPT:
       case OPTIMIZED:
       case INTERPRETED:
+      case BUILTIN:
         // These frame types have a context, but they are actually stored
         // in the place on the stack that one finds the frame type.
         UNREACHABLE();
@@ -720,10 +728,9 @@
   if (safepoint_entry.has_doubles()) {
     // Number of doubles not known at snapshot time.
     DCHECK(!isolate()->serializer_enabled());
-    parameters_base +=
-        RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
-            ->num_allocatable_double_registers() *
-        kDoubleSize / kPointerSize;
+    parameters_base += RegisterConfiguration::Crankshaft()
+                           ->num_allocatable_double_registers() *
+                       kDoubleSize / kPointerSize;
   }
 
   // Visit the registers that contain pointers if any.
@@ -1286,11 +1293,6 @@
   return Smi::cast(GetExpression(0))->value();
 }
 
-
-Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
-  return fp() + StandardFrameConstants::kCallerSPOffset;
-}
-
 int ArgumentsAdaptorFrame::GetLength(Address fp) {
   const int offset = ArgumentsAdaptorFrameConstants::kLengthOffset;
   return Smi::cast(Memory::Object_at(fp + offset))->value();
@@ -1301,6 +1303,15 @@
       Builtins::kArgumentsAdaptorTrampoline);
 }
 
+void BuiltinFrame::Print(StringStream* accumulator, PrintMode mode,
+                         int index) const {
+  // TODO(bmeurer)
+}
+
+int BuiltinFrame::GetNumberOfIncomingArguments() const {
+  return Smi::cast(GetExpression(0))->value();
+}
+
 Address InternalFrame::GetCallerStackPointer() const {
   // Internal frames have no arguments. The stack pointer of the
   // caller is at a fixed offset from the frame pointer.
@@ -1346,7 +1357,7 @@
   FixedArray* deopt_data = LookupCode()->deoptimization_data();
   DCHECK(deopt_data->length() == 2);
   Object* func_index_obj = deopt_data->get(1);
-  if (func_index_obj->IsUndefined()) return static_cast<uint32_t>(-1);
+  if (func_index_obj->IsUndefined(isolate())) return static_cast<uint32_t>(-1);
   if (func_index_obj->IsSmi()) return Smi::cast(func_index_obj)->value();
   DCHECK(func_index_obj->IsHeapNumber());
   uint32_t val = static_cast<uint32_t>(-1);
@@ -1355,13 +1366,6 @@
   return val;
 }
 
-Object* WasmFrame::function_name() {
-  Object* wasm_object = wasm_obj();
-  if (wasm_object->IsUndefined()) return wasm_object;
-  Handle<JSObject> wasm = handle(JSObject::cast(wasm_object));
-  return *wasm::GetWasmFunctionName(wasm, function_index());
-}
-
 namespace {
 
 
diff --git a/src/frames.h b/src/frames.h
index 4163d6f..a1e438c 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -111,7 +111,8 @@
   V(STUB_FAILURE_TRAMPOLINE, StubFailureTrampolineFrame) \
   V(INTERNAL, InternalFrame)                             \
   V(CONSTRUCT, ConstructFrame)                           \
-  V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
+  V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)            \
+  V(BUILTIN, BuiltinFrame)
 
 // Every pointer in a frame has a slot id. On 32-bit platforms, doubles consume
 // two slots.
@@ -280,6 +281,14 @@
   DEFINE_TYPED_FRAME_SIZES(2);
 };
 
+class BuiltinFrameConstants : public TypedFrameConstants {
+ public:
+  // FP-relative.
+  static const int kFunctionOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+  static const int kLengthOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+  DEFINE_TYPED_FRAME_SIZES(2);
+};
+
 class InternalFrameConstants : public TypedFrameConstants {
  public:
   // FP-relative.
@@ -325,6 +334,8 @@
 
   // FP-relative.
   static const int kLastParamFromFp = StandardFrameConstants::kCallerSPOffset;
+  static const int kCallerPCOffsetFromFp =
+      StandardFrameConstants::kCallerPCOffset;
   static const int kNewTargetFromFp =
       -StandardFrameConstants::kFixedFrameSizeFromFp - 1 * kPointerSize;
   static const int kBytecodeArrayFromFp =
@@ -409,6 +420,7 @@
   bool is_wasm_to_js() const { return type() == WASM_TO_JS; }
   bool is_js_to_wasm() const { return type() == JS_TO_WASM; }
   bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
+  bool is_builtin() const { return type() == BUILTIN; }
   bool is_internal() const { return type() == INTERNAL; }
   bool is_stub_failure_trampoline() const {
     return type() == STUB_FAILURE_TRAMPOLINE;
@@ -419,7 +431,7 @@
   bool is_java_script() const {
     Type type = this->type();
     return (type == JAVA_SCRIPT) || (type == OPTIMIZED) ||
-           (type == INTERPRETED);
+           (type == INTERPRETED) || (type == BUILTIN);
   }
 
   // Accessors.
@@ -948,7 +960,28 @@
 
   int GetNumberOfIncomingArguments() const override;
 
-  Address GetCallerStackPointer() const override;
+ private:
+  friend class StackFrameIteratorBase;
+};
+
+// Builtin frames are built for builtins with JavaScript linkage, such as
+// various standard library functions (i.e. Math.asin, Math.floor, etc.).
+class BuiltinFrame final : public JavaScriptFrame {
+ public:
+  Type type() const final { return BUILTIN; }
+
+  static BuiltinFrame* cast(StackFrame* frame) {
+    DCHECK(frame->is_builtin());
+    return static_cast<BuiltinFrame*>(frame);
+  }
+
+  // Printing support.
+  void Print(StringStream* accumulator, PrintMode mode, int index) const final;
+
+ protected:
+  inline explicit BuiltinFrame(StackFrameIteratorBase* iterator);
+
+  int GetNumberOfIncomingArguments() const final;
 
  private:
   friend class StackFrameIteratorBase;
@@ -971,8 +1004,6 @@
   Object* wasm_obj();
   uint32_t function_index();
 
-  Object* function_name();
-
   static WasmFrame* cast(StackFrame* frame) {
     DCHECK(frame->is_wasm());
     return static_cast<WasmFrame*>(frame);
diff --git a/src/full-codegen/arm/full-codegen-arm.cc b/src/full-codegen/arm/full-codegen-arm.cc
index 91253e3..73e4750 100644
--- a/src/full-codegen/arm/full-codegen-arm.cc
+++ b/src/full-codegen/arm/full-codegen-arm.cc
@@ -540,10 +540,12 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
-  if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+  DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
+         !lit->IsUndetectable());
+  if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
+      lit->IsFalse(isolate())) {
     if (false_label_ != fall_through_) __ b(false_label_);
-  } else if (lit->IsTrue() || lit->IsJSObject()) {
+  } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
     if (true_label_ != fall_through_) __ b(true_label_);
   } else if (lit->IsString()) {
     if (String::cast(*lit)->length() == 0) {
@@ -799,21 +801,11 @@
 
     case VariableLocation::LOOKUP: {
       Comment cmnt(masm_, "[ VariableDeclaration");
+      DCHECK_EQ(VAR, mode);
+      DCHECK(!hole_init);
       __ mov(r2, Operand(variable->name()));
-      // Declaration nodes are always introduced in one of four modes.
-      DCHECK(IsDeclaredVariableMode(mode));
-      // Push initial value, if any.
-      // Note: For variables we must not push an initial value (such as
-      // 'undefined') because we may have a (legal) redeclaration and we
-      // must not destroy the current value.
-      if (hole_init) {
-        __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
-      } else {
-        __ mov(r0, Operand(Smi::FromInt(0)));  // Indicates no initial value.
-      }
-      __ Push(r2, r0);
-      __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
-      __ CallRuntime(Runtime::kDeclareLookupSlot);
+      __ Push(r2);
+      __ CallRuntime(Runtime::kDeclareEvalVar);
       PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
@@ -870,8 +862,7 @@
       PushOperand(r2);
       // Push initial value for function declaration.
       VisitForStackValue(declaration->fun());
-      PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
-      CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+      CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
       PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
@@ -1303,14 +1294,14 @@
 
 void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
                                                TypeofMode typeof_mode) {
+#ifdef DEBUG
   Variable* var = proxy->var();
   DCHECK(var->IsUnallocatedOrGlobalSlot() ||
          (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-  __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
-  __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
-  __ mov(LoadDescriptor::SlotRegister(),
+#endif
+  __ mov(LoadGlobalDescriptor::SlotRegister(),
          Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
-  CallLoadIC(typeof_mode);
+  CallLoadGlobalIC(typeof_mode);
 }
 
 
@@ -1379,18 +1370,6 @@
 }
 
 
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
-  Comment cmnt(masm_, "[ RegExpLiteral");
-  __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
-  __ mov(r1, Operand(expr->pattern()));
-  __ mov(r0, Operand(Smi::FromInt(expr->flags())));
-  FastCloneRegExpStub stub(isolate());
-  __ CallStub(&stub);
-  context()->Plug(r0);
-}
-
-
 void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
   Expression* expression = (property == NULL) ? NULL : property->value();
   if (expression == NULL) {
@@ -1500,12 +1479,16 @@
 
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
-          accessor_table.lookup(key)->second->getter = property;
+          AccessorTable::Iterator it = accessor_table.lookup(key);
+          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->getter = property;
         }
         break;
       case ObjectLiteral::Property::SETTER:
         if (property->emit_store()) {
-          accessor_table.lookup(key)->second->setter = property;
+          AccessorTable::Iterator it = accessor_table.lookup(key);
+          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->setter = property;
         }
         break;
     }
@@ -1524,6 +1507,7 @@
     __ mov(r0, Operand(Smi::FromInt(NONE)));
     PushOperand(r0);
     CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
+    PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
   }
 
   // Object literals have two parts. The "static" part on the left contains no
@@ -1569,6 +1553,8 @@
             PushOperand(Smi::FromInt(NONE));
             PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
             CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
+            PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+                                   BailoutState::NO_REGISTERS);
           } else {
             DropOperands(3);
           }
@@ -1849,7 +1835,7 @@
   // When we arrive here, r0 holds the generator object.
   __ RecordGeneratorContinuation();
   __ ldr(r1, FieldMemOperand(r0, JSGeneratorObject::kResumeModeOffset));
-  __ ldr(r0, FieldMemOperand(r0, JSGeneratorObject::kInputOffset));
+  __ ldr(r0, FieldMemOperand(r0, JSGeneratorObject::kInputOrDebugPosOffset));
   STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
   STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
   __ cmp(r1, Operand(Smi::FromInt(JSGeneratorObject::kReturn)));
@@ -2901,73 +2887,6 @@
 }
 
 
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(3, args->length());
-
-  Register string = r0;
-  Register index = r1;
-  Register value = r2;
-
-  VisitForStackValue(args->at(0));        // index
-  VisitForStackValue(args->at(1));        // value
-  VisitForAccumulatorValue(args->at(2));  // string
-  PopOperands(index, value);
-
-  if (FLAG_debug_code) {
-    __ SmiTst(value);
-    __ Check(eq, kNonSmiValue);
-    __ SmiTst(index);
-    __ Check(eq, kNonSmiIndex);
-    __ SmiUntag(index, index);
-    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
-    __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
-    __ SmiTag(index, index);
-  }
-
-  __ SmiUntag(value, value);
-  __ add(ip,
-         string,
-         Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-  __ strb(value, MemOperand(ip, index, LSR, kSmiTagSize));
-  context()->Plug(string);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(3, args->length());
-
-  Register string = r0;
-  Register index = r1;
-  Register value = r2;
-
-  VisitForStackValue(args->at(0));        // index
-  VisitForStackValue(args->at(1));        // value
-  VisitForAccumulatorValue(args->at(2));  // string
-  PopOperands(index, value);
-
-  if (FLAG_debug_code) {
-    __ SmiTst(value);
-    __ Check(eq, kNonSmiValue);
-    __ SmiTst(index);
-    __ Check(eq, kNonSmiIndex);
-    __ SmiUntag(index, index);
-    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
-    __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
-    __ SmiTag(index, index);
-  }
-
-  __ SmiUntag(value, value);
-  __ add(ip,
-         string,
-         Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-  __ strh(value, MemOperand(ip, index));
-  context()->Plug(string);
-}
-
-
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3001,13 +2920,8 @@
   Label need_conversion;
   Label index_out_of_range;
   Label done;
-  StringCharCodeAtGenerator generator(object,
-                                      index,
-                                      result,
-                                      &need_conversion,
-                                      &need_conversion,
-                                      &index_out_of_range,
-                                      STRING_INDEX_IS_NUMBER);
+  StringCharCodeAtGenerator generator(object, index, result, &need_conversion,
+                                      &need_conversion, &index_out_of_range);
   generator.GenerateFast(masm_);
   __ jmp(&done);
 
@@ -3031,53 +2945,6 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-  VisitForStackValue(args->at(0));
-  VisitForAccumulatorValue(args->at(1));
-
-  Register object = r1;
-  Register index = r0;
-  Register scratch = r3;
-  Register result = r0;
-
-  PopOperand(object);
-
-  Label need_conversion;
-  Label index_out_of_range;
-  Label done;
-  StringCharAtGenerator generator(object,
-                                  index,
-                                  scratch,
-                                  result,
-                                  &need_conversion,
-                                  &need_conversion,
-                                  &index_out_of_range,
-                                  STRING_INDEX_IS_NUMBER);
-  generator.GenerateFast(masm_);
-  __ jmp(&done);
-
-  __ bind(&index_out_of_range);
-  // When the index is out of range, the spec requires us to return
-  // the empty string.
-  __ LoadRoot(result, Heap::kempty_stringRootIndex);
-  __ jmp(&done);
-
-  __ bind(&need_conversion);
-  // Move smi zero into the result register, which will trigger
-  // conversion.
-  __ mov(result, Operand(Smi::FromInt(0)));
-  __ jmp(&done);
-
-  NopRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
-
-  __ bind(&done);
-  context()->Plug(result);
-}
-
-
 void FullCodeGenerator::EmitCall(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_LE(2, args->length());
@@ -3447,8 +3314,7 @@
   }
 
   // Convert old value into a number.
-  ToNumberStub convert_stub(isolate());
-  __ CallStub(&convert_stub);
+  __ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
   PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
 
   // Save result for postfix expressions.
diff --git a/src/full-codegen/arm64/full-codegen-arm64.cc b/src/full-codegen/arm64/full-codegen-arm64.cc
index 61cb141..7848d0d 100644
--- a/src/full-codegen/arm64/full-codegen-arm64.cc
+++ b/src/full-codegen/arm64/full-codegen-arm64.cc
@@ -531,10 +531,12 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
-  if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+  DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
+         !lit->IsUndetectable());
+  if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
+      lit->IsFalse(isolate())) {
     if (false_label_ != fall_through_) __ B(false_label_);
-  } else if (lit->IsTrue() || lit->IsJSObject()) {
+  } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
     if (true_label_ != fall_through_) __ B(true_label_);
   } else if (lit->IsString()) {
     if (String::cast(*lit)->length() == 0) {
@@ -796,22 +798,11 @@
 
     case VariableLocation::LOOKUP: {
       Comment cmnt(masm_, "[ VariableDeclaration");
+      DCHECK_EQ(VAR, mode);
+      DCHECK(!hole_init);
       __ Mov(x2, Operand(variable->name()));
-      // Declaration nodes are always introduced in one of four modes.
-      DCHECK(IsDeclaredVariableMode(mode));
-      // Push initial value, if any.
-      // Note: For variables we must not push an initial value (such as
-      // 'undefined') because we may have a (legal) redeclaration and we
-      // must not destroy the current value.
-      if (hole_init) {
-        __ LoadRoot(x0, Heap::kTheHoleValueRootIndex);
-        __ Push(x2, x0);
-      } else {
-        // Pushing 0 (xzr) indicates no initial value.
-        __ Push(x2, xzr);
-      }
-      __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
-      __ CallRuntime(Runtime::kDeclareLookupSlot);
+      __ Push(x2);
+      __ CallRuntime(Runtime::kDeclareEvalVar);
       PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
@@ -868,8 +859,7 @@
       PushOperand(x2);
       // Push initial value for function declaration.
       VisitForStackValue(declaration->fun());
-      PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
-      CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+      CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
       PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
@@ -1288,14 +1278,14 @@
 
 void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
                                                TypeofMode typeof_mode) {
+#ifdef DEBUG
   Variable* var = proxy->var();
   DCHECK(var->IsUnallocatedOrGlobalSlot() ||
          (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-  __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
-  __ Mov(LoadDescriptor::NameRegister(), Operand(var->name()));
-  __ Mov(LoadDescriptor::SlotRegister(),
+#endif
+  __ Mov(LoadGlobalDescriptor::SlotRegister(),
          SmiFromSlot(proxy->VariableFeedbackSlot()));
-  CallLoadIC(typeof_mode);
+  CallLoadGlobalIC(typeof_mode);
 }
 
 
@@ -1365,18 +1355,6 @@
 }
 
 
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
-  Comment cmnt(masm_, "[ RegExpLiteral");
-  __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ Mov(x2, Smi::FromInt(expr->literal_index()));
-  __ Mov(x1, Operand(expr->pattern()));
-  __ Mov(x0, Smi::FromInt(expr->flags()));
-  FastCloneRegExpStub stub(isolate());
-  __ CallStub(&stub);
-  context()->Plug(x0);
-}
-
-
 void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
   Expression* expression = (property == NULL) ? NULL : property->value();
   if (expression == NULL) {
@@ -1484,12 +1462,16 @@
         break;
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
-          accessor_table.lookup(key)->second->getter = property;
+          AccessorTable::Iterator it = accessor_table.lookup(key);
+          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->getter = property;
         }
         break;
       case ObjectLiteral::Property::SETTER:
         if (property->emit_store()) {
-          accessor_table.lookup(key)->second->setter = property;
+          AccessorTable::Iterator it = accessor_table.lookup(key);
+          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->setter = property;
         }
         break;
     }
@@ -1500,14 +1482,15 @@
   for (AccessorTable::Iterator it = accessor_table.begin();
        it != accessor_table.end();
        ++it) {
-      __ Peek(x10, 0);  // Duplicate receiver.
-      PushOperand(x10);
-      VisitForStackValue(it->first);
-      EmitAccessor(it->second->getter);
-      EmitAccessor(it->second->setter);
-      __ Mov(x10, Smi::FromInt(NONE));
-      PushOperand(x10);
-      CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
+    __ Peek(x10, 0);  // Duplicate receiver.
+    PushOperand(x10);
+    VisitForStackValue(it->first);
+    EmitAccessor(it->second->getter);
+    EmitAccessor(it->second->setter);
+    __ Mov(x10, Smi::FromInt(NONE));
+    PushOperand(x10);
+    CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
+    PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
   }
 
   // Object literals have two parts. The "static" part on the left contains no
@@ -1553,6 +1536,8 @@
             PushOperand(Smi::FromInt(NONE));
             PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
             CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
+            PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+                                   BailoutState::NO_REGISTERS);
           } else {
             DropOperands(3);
           }
@@ -2809,66 +2794,6 @@
 }
 
 
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(3, args->length());
-
-  Register string = x0;
-  Register index = x1;
-  Register value = x2;
-  Register scratch = x10;
-
-  VisitForStackValue(args->at(0));        // index
-  VisitForStackValue(args->at(1));        // value
-  VisitForAccumulatorValue(args->at(2));  // string
-  PopOperands(value, index);
-
-  if (FLAG_debug_code) {
-    __ AssertSmi(value, kNonSmiValue);
-    __ AssertSmi(index, kNonSmiIndex);
-    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
-    __ EmitSeqStringSetCharCheck(string, index, kIndexIsSmi, scratch,
-                                 one_byte_seq_type);
-  }
-
-  __ Add(scratch, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
-  __ SmiUntag(value);
-  __ SmiUntag(index);
-  __ Strb(value, MemOperand(scratch, index));
-  context()->Plug(string);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(3, args->length());
-
-  Register string = x0;
-  Register index = x1;
-  Register value = x2;
-  Register scratch = x10;
-
-  VisitForStackValue(args->at(0));        // index
-  VisitForStackValue(args->at(1));        // value
-  VisitForAccumulatorValue(args->at(2));  // string
-  PopOperands(value, index);
-
-  if (FLAG_debug_code) {
-    __ AssertSmi(value, kNonSmiValue);
-    __ AssertSmi(index, kNonSmiIndex);
-    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
-    __ EmitSeqStringSetCharCheck(string, index, kIndexIsSmi, scratch,
-                                 two_byte_seq_type);
-  }
-
-  __ Add(scratch, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
-  __ SmiUntag(value);
-  __ SmiUntag(index);
-  __ Strh(value, MemOperand(scratch, index, LSL, 1));
-  context()->Plug(string);
-}
-
-
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -2907,13 +2832,8 @@
   Label need_conversion;
   Label index_out_of_range;
   Label done;
-  StringCharCodeAtGenerator generator(object,
-                                      index,
-                                      result,
-                                      &need_conversion,
-                                      &need_conversion,
-                                      &index_out_of_range,
-                                      STRING_INDEX_IS_NUMBER);
+  StringCharCodeAtGenerator generator(object, index, result, &need_conversion,
+                                      &need_conversion, &index_out_of_range);
   generator.GenerateFast(masm_);
   __ B(&done);
 
@@ -2936,52 +2856,6 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-
-  VisitForStackValue(args->at(0));
-  VisitForAccumulatorValue(args->at(1));
-
-  Register object = x1;
-  Register index = x0;
-  Register result = x0;
-
-  PopOperand(object);
-
-  Label need_conversion;
-  Label index_out_of_range;
-  Label done;
-  StringCharAtGenerator generator(object,
-                                  index,
-                                  x3,
-                                  result,
-                                  &need_conversion,
-                                  &need_conversion,
-                                  &index_out_of_range,
-                                  STRING_INDEX_IS_NUMBER);
-  generator.GenerateFast(masm_);
-  __ B(&done);
-
-  __ Bind(&index_out_of_range);
-  // When the index is out of range, the spec requires us to return
-  // the empty string.
-  __ LoadRoot(result, Heap::kempty_stringRootIndex);
-  __ B(&done);
-
-  __ Bind(&need_conversion);
-  // Move smi zero into the result register, which will trigger conversion.
-  __ Mov(result, Smi::FromInt(0));
-  __ B(&done);
-
-  NopRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
-
-  __ Bind(&done);
-  context()->Plug(result);
-}
-
-
 void FullCodeGenerator::EmitCall(CallRuntime* expr) {
   ASM_LOCATION("FullCodeGenerator::EmitCall");
   ZoneList<Expression*>* args = expr->arguments();
@@ -3359,8 +3233,7 @@
   }
 
   // Convert old value into a number.
-  ToNumberStub convert_stub(isolate());
-  __ CallStub(&convert_stub);
+  __ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
   PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
 
   // Save result for postfix expressions.
@@ -3701,7 +3574,7 @@
   // When we arrive here, x0 holds the generator object.
   __ RecordGeneratorContinuation();
   __ Ldr(x1, FieldMemOperand(x0, JSGeneratorObject::kResumeModeOffset));
-  __ Ldr(x0, FieldMemOperand(x0, JSGeneratorObject::kInputOffset));
+  __ Ldr(x0, FieldMemOperand(x0, JSGeneratorObject::kInputOrDebugPosOffset));
   STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
   STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
   __ Cmp(x1, Operand(Smi::FromInt(JSGeneratorObject::kReturn)));
diff --git a/src/full-codegen/full-codegen.cc b/src/full-codegen/full-codegen.cc
index 2d7ad32..03140c9 100644
--- a/src/full-codegen/full-codegen.cc
+++ b/src/full-codegen/full-codegen.cc
@@ -34,7 +34,8 @@
   TRACE_EVENT0("v8", "V8.CompileFullCode");
 
   Handle<Script> script = info->script();
-  if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+  if (!script->IsUndefined(isolate) &&
+      !script->source()->IsUndefined(isolate)) {
     int len = String::cast(script->source())->length();
     isolate->counters()->total_full_codegen_source_size()->Increment(len);
   }
@@ -169,13 +170,16 @@
   PrepareForBailoutForId(node->id(), state);
 }
 
-
-void FullCodeGenerator::CallLoadIC(TypeofMode typeof_mode,
-                                   TypeFeedbackId id) {
-  Handle<Code> ic = CodeFactory::LoadIC(isolate(), typeof_mode).code();
+void FullCodeGenerator::CallLoadIC(TypeFeedbackId id) {
+  Handle<Code> ic = CodeFactory::LoadIC(isolate()).code();
   CallIC(ic, id);
 }
 
+void FullCodeGenerator::CallLoadGlobalIC(TypeofMode typeof_mode,
+                                         TypeFeedbackId id) {
+  Handle<Code> ic = CodeFactory::LoadGlobalIC(isolate(), typeof_mode).code();
+  CallIC(ic, id);
+}
 
 void FullCodeGenerator::CallStoreIC(TypeFeedbackId id) {
   Handle<Code> ic = CodeFactory::StoreIC(isolate(), language_mode()).code();
@@ -611,18 +615,14 @@
   RestoreContext();
 }
 
-bool RecordStatementPosition(MacroAssembler* masm, int pos) {
-  if (pos == RelocInfo::kNoPosition) return false;
+void RecordStatementPosition(MacroAssembler* masm, int pos) {
+  if (pos == RelocInfo::kNoPosition) return;
   masm->positions_recorder()->RecordStatementPosition(pos);
-  masm->positions_recorder()->RecordPosition(pos);
-  return masm->positions_recorder()->WriteRecordedPositions();
 }
 
-
-bool RecordPosition(MacroAssembler* masm, int pos) {
-  if (pos == RelocInfo::kNoPosition) return false;
+void RecordPosition(MacroAssembler* masm, int pos) {
+  if (pos == RelocInfo::kNoPosition) return;
   masm->positions_recorder()->RecordPosition(pos);
-  return masm->positions_recorder()->WriteRecordedPositions();
 }
 
 
@@ -646,8 +646,8 @@
 void FullCodeGenerator::SetStatementPosition(
     Statement* stmt, FullCodeGenerator::InsertBreak insert_break) {
   if (stmt->position() == RelocInfo::kNoPosition) return;
-  bool recorded = RecordStatementPosition(masm_, stmt->position());
-  if (recorded && insert_break == INSERT_BREAK && info_->is_debug() &&
+  RecordStatementPosition(masm_, stmt->position());
+  if (insert_break == INSERT_BREAK && info_->is_debug() &&
       !stmt->IsDebuggerStatement()) {
     DebugCodegen::GenerateSlot(masm_, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION);
   }
@@ -661,8 +661,8 @@
 
 void FullCodeGenerator::SetExpressionAsStatementPosition(Expression* expr) {
   if (expr->position() == RelocInfo::kNoPosition) return;
-  bool recorded = RecordStatementPosition(masm_, expr->position());
-  if (recorded && info_->is_debug()) {
+  RecordStatementPosition(masm_, expr->position());
+  if (info_->is_debug()) {
     DebugCodegen::GenerateSlot(masm_, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION);
   }
 }
@@ -1032,17 +1032,12 @@
 
 void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
                                        bool pretenure) {
-  // Use the fast case closure allocation code that allocates in new
-  // space for nested functions that don't need literals cloning. If
-  // we're running with the --always-opt or the --prepare-always-opt
+  // If we're running with the --always-opt or the --prepare-always-opt
   // flag, we need to use the runtime function so that the new function
   // we are creating here gets a chance to have its code optimized and
   // doesn't just get a copy of the existing unoptimized code.
-  if (!FLAG_always_opt &&
-      !FLAG_prepare_always_opt &&
-      !pretenure &&
-      scope()->is_function_scope() &&
-      info->num_literals() == 0) {
+  if (!FLAG_always_opt && !FLAG_prepare_always_opt && !pretenure &&
+      scope()->is_function_scope()) {
     FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
     __ Move(stub.GetCallInterfaceDescriptor().GetRegisterParameter(0), info);
     __ CallStub(&stub);
@@ -1063,7 +1058,7 @@
   __ Move(LoadDescriptor::NameRegister(), key->value());
   __ Move(LoadDescriptor::SlotRegister(),
           SmiFromSlot(prop->PropertyFeedbackSlot()));
-  CallLoadIC(NOT_INSIDE_TYPEOF);
+  CallLoadIC();
 }
 
 void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
@@ -1526,7 +1521,7 @@
     __ LoadRoot(LoadDescriptor::NameRegister(),
                 Heap::kprototype_stringRootIndex);
     __ Move(LoadDescriptor::SlotRegister(), SmiFromSlot(lit->PrototypeSlot()));
-    CallLoadIC(NOT_INSIDE_TYPEOF);
+    CallLoadIC();
     PrepareForBailoutForId(lit->PrototypeId(), BailoutState::TOS_REGISTER);
     PushOperand(result_register());
 
@@ -1545,6 +1540,19 @@
   context()->Plug(result_register());
 }
 
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+  Comment cmnt(masm_, "[ RegExpLiteral");
+  Callable callable = CodeFactory::FastCloneRegExp(isolate());
+  CallInterfaceDescriptor descriptor = callable.descriptor();
+  LoadFromFrameField(JavaScriptFrameConstants::kFunctionOffset,
+                     descriptor.GetRegisterParameter(0));
+  __ Move(descriptor.GetRegisterParameter(1),
+          Smi::FromInt(expr->literal_index()));
+  __ Move(descriptor.GetRegisterParameter(2), expr->pattern());
+  __ Move(descriptor.GetRegisterParameter(3), Smi::FromInt(expr->flags()));
+  __ Call(callable.code(), RelocInfo::CODE_TARGET);
+  context()->Plug(result_register());
+}
 
 void FullCodeGenerator::VisitNativeFunctionLiteral(
     NativeFunctionLiteral* expr) {
diff --git a/src/full-codegen/full-codegen.h b/src/full-codegen/full-codegen.h
index 0a004a8..87367ca 100644
--- a/src/full-codegen/full-codegen.h
+++ b/src/full-codegen/full-codegen.h
@@ -512,9 +512,6 @@
   F(NewObject)                          \
   F(ValueOf)                            \
   F(StringCharFromCode)                 \
-  F(StringCharAt)                       \
-  F(OneByteSeqStringSetChar)            \
-  F(TwoByteSeqStringSetChar)            \
   F(IsJSReceiver)                       \
   F(MathPow)                            \
   F(HasCachedArrayIndex)                \
@@ -657,9 +654,10 @@
   void CallIC(Handle<Code> code,
               TypeFeedbackId id = TypeFeedbackId::None());
 
+  void CallLoadIC(TypeFeedbackId id = TypeFeedbackId::None());
   // Inside typeof reference errors are never thrown.
-  void CallLoadIC(TypeofMode typeof_mode,
-                  TypeFeedbackId id = TypeFeedbackId::None());
+  void CallLoadGlobalIC(TypeofMode typeof_mode,
+                        TypeFeedbackId id = TypeFeedbackId::None());
   void CallStoreIC(TypeFeedbackId id = TypeFeedbackId::None());
 
   void SetFunctionPosition(FunctionLiteral* fun);
diff --git a/src/full-codegen/ia32/full-codegen-ia32.cc b/src/full-codegen/ia32/full-codegen-ia32.cc
index 760a818..c0f8396 100644
--- a/src/full-codegen/ia32/full-codegen-ia32.cc
+++ b/src/full-codegen/ia32/full-codegen-ia32.cc
@@ -490,10 +490,12 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
-  if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+  DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
+         !lit->IsUndetectable());
+  if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
+      lit->IsFalse(isolate())) {
     if (false_label_ != fall_through_) __ jmp(false_label_);
-  } else if (lit->IsTrue() || lit->IsJSObject()) {
+  } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
     if (true_label_ != fall_through_) __ jmp(true_label_);
   } else if (lit->IsString()) {
     if (String::cast(*lit)->length() == 0) {
@@ -746,21 +748,10 @@
 
     case VariableLocation::LOOKUP: {
       Comment cmnt(masm_, "[ VariableDeclaration");
+      DCHECK_EQ(VAR, mode);
+      DCHECK(!hole_init);
       __ push(Immediate(variable->name()));
-      // VariableDeclaration nodes are always introduced in one of four modes.
-      DCHECK(IsDeclaredVariableMode(mode));
-      // Push initial value, if any.
-      // Note: For variables we must not push an initial value (such as
-      // 'undefined') because we may have a (legal) redeclaration and we
-      // must not destroy the current value.
-      if (hole_init) {
-        __ push(Immediate(isolate()->factory()->the_hole_value()));
-      } else {
-        __ push(Immediate(Smi::FromInt(0)));  // Indicates no initial value.
-      }
-      __ push(
-          Immediate(Smi::FromInt(variable->DeclarationPropertyAttributes())));
-      __ CallRuntime(Runtime::kDeclareLookupSlot);
+      __ CallRuntime(Runtime::kDeclareEvalVar);
       PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
@@ -813,8 +804,7 @@
       Comment cmnt(masm_, "[ FunctionDeclaration");
       PushOperand(variable->name());
       VisitForStackValue(declaration->fun());
-      PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
-      CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+      CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
       PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
@@ -1226,17 +1216,14 @@
 
 void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
                                                TypeofMode typeof_mode) {
+#ifdef DEBUG
   Variable* var = proxy->var();
   DCHECK(var->IsUnallocatedOrGlobalSlot() ||
          (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-  __ mov(LoadDescriptor::ReceiverRegister(), NativeContextOperand());
-  __ mov(LoadDescriptor::ReceiverRegister(),
-         ContextOperand(LoadDescriptor::ReceiverRegister(),
-                        Context::EXTENSION_INDEX));
-  __ mov(LoadDescriptor::NameRegister(), var->name());
-  __ mov(LoadDescriptor::SlotRegister(),
+#endif
+  __ mov(LoadGlobalDescriptor::SlotRegister(),
          Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
-  CallLoadIC(typeof_mode);
+  CallLoadGlobalIC(typeof_mode);
 }
 
 
@@ -1305,18 +1292,6 @@
 }
 
 
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
-  Comment cmnt(masm_, "[ RegExpLiteral");
-  __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-  __ Move(eax, Immediate(Smi::FromInt(expr->literal_index())));
-  __ Move(ecx, Immediate(expr->pattern()));
-  __ Move(edx, Immediate(Smi::FromInt(expr->flags())));
-  FastCloneRegExpStub stub(isolate());
-  __ CallStub(&stub);
-  context()->Plug(eax);
-}
-
-
 void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
   Expression* expression = (property == NULL) ? NULL : property->value();
   if (expression == NULL) {
@@ -1423,12 +1398,16 @@
         break;
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
-          accessor_table.lookup(key)->second->getter = property;
+          AccessorTable::Iterator it = accessor_table.lookup(key);
+          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->getter = property;
         }
         break;
       case ObjectLiteral::Property::SETTER:
         if (property->emit_store()) {
-          accessor_table.lookup(key)->second->setter = property;
+          AccessorTable::Iterator it = accessor_table.lookup(key);
+          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->setter = property;
         }
         break;
     }
@@ -1447,6 +1426,7 @@
 
     PushOperand(Smi::FromInt(NONE));
     CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
+    PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
   }
 
   // Object literals have two parts. The "static" part on the left contains no
@@ -1491,6 +1471,8 @@
             PushOperand(Smi::FromInt(NONE));
             PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
             CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
+            PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+                                   BailoutState::NO_REGISTERS);
           } else {
             DropOperands(3);
           }
@@ -1766,7 +1748,7 @@
   // When we arrive here, eax holds the generator object.
   __ RecordGeneratorContinuation();
   __ mov(ebx, FieldOperand(eax, JSGeneratorObject::kResumeModeOffset));
-  __ mov(eax, FieldOperand(eax, JSGeneratorObject::kInputOffset));
+  __ mov(eax, FieldOperand(eax, JSGeneratorObject::kInputOrDebugPosOffset));
   STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
   STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
   __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::kReturn)));
@@ -2791,75 +2773,6 @@
 }
 
 
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(3, args->length());
-
-  Register string = eax;
-  Register index = ebx;
-  Register value = ecx;
-
-  VisitForStackValue(args->at(0));        // index
-  VisitForStackValue(args->at(1));        // value
-  VisitForAccumulatorValue(args->at(2));  // string
-
-  PopOperand(value);
-  PopOperand(index);
-
-  if (FLAG_debug_code) {
-    __ test(value, Immediate(kSmiTagMask));
-    __ Check(zero, kNonSmiValue);
-    __ test(index, Immediate(kSmiTagMask));
-    __ Check(zero, kNonSmiValue);
-  }
-
-  __ SmiUntag(value);
-  __ SmiUntag(index);
-
-  if (FLAG_debug_code) {
-    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
-    __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
-  }
-
-  __ mov_b(FieldOperand(string, index, times_1, SeqOneByteString::kHeaderSize),
-           value);
-  context()->Plug(string);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(3, args->length());
-
-  Register string = eax;
-  Register index = ebx;
-  Register value = ecx;
-
-  VisitForStackValue(args->at(0));        // index
-  VisitForStackValue(args->at(1));        // value
-  VisitForAccumulatorValue(args->at(2));  // string
-  PopOperand(value);
-  PopOperand(index);
-
-  if (FLAG_debug_code) {
-    __ test(value, Immediate(kSmiTagMask));
-    __ Check(zero, kNonSmiValue);
-    __ test(index, Immediate(kSmiTagMask));
-    __ Check(zero, kNonSmiValue);
-    __ SmiUntag(index);
-    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
-    __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
-    __ SmiTag(index);
-  }
-
-  __ SmiUntag(value);
-  // No need to untag a smi for two-byte addressing.
-  __ mov_w(FieldOperand(string, index, times_1, SeqTwoByteString::kHeaderSize),
-           value);
-  context()->Plug(string);
-}
-
-
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -2895,13 +2808,8 @@
   Label need_conversion;
   Label index_out_of_range;
   Label done;
-  StringCharCodeAtGenerator generator(object,
-                                      index,
-                                      result,
-                                      &need_conversion,
-                                      &need_conversion,
-                                      &index_out_of_range,
-                                      STRING_INDEX_IS_NUMBER);
+  StringCharCodeAtGenerator generator(object, index, result, &need_conversion,
+                                      &need_conversion, &index_out_of_range);
   generator.GenerateFast(masm_);
   __ jmp(&done);
 
@@ -2925,54 +2833,6 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-
-  VisitForStackValue(args->at(0));
-  VisitForAccumulatorValue(args->at(1));
-
-  Register object = ebx;
-  Register index = eax;
-  Register scratch = edx;
-  Register result = eax;
-
-  PopOperand(object);
-
-  Label need_conversion;
-  Label index_out_of_range;
-  Label done;
-  StringCharAtGenerator generator(object,
-                                  index,
-                                  scratch,
-                                  result,
-                                  &need_conversion,
-                                  &need_conversion,
-                                  &index_out_of_range,
-                                  STRING_INDEX_IS_NUMBER);
-  generator.GenerateFast(masm_);
-  __ jmp(&done);
-
-  __ bind(&index_out_of_range);
-  // When the index is out of range, the spec requires us to return
-  // the empty string.
-  __ Move(result, Immediate(isolate()->factory()->empty_string()));
-  __ jmp(&done);
-
-  __ bind(&need_conversion);
-  // Move smi zero into the result register, which will trigger
-  // conversion.
-  __ Move(result, Immediate(Smi::FromInt(0)));
-  __ jmp(&done);
-
-  NopRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
-
-  __ bind(&done);
-  context()->Plug(result);
-}
-
-
 void FullCodeGenerator::EmitCall(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_LE(2, args->length());
@@ -3351,8 +3211,7 @@
   }
 
   // Convert old value into a number.
-  ToNumberStub convert_stub(isolate());
-  __ CallStub(&convert_stub);
+  __ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
   PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
 
   // Save result for postfix expressions.
diff --git a/src/full-codegen/mips/full-codegen-mips.cc b/src/full-codegen/mips/full-codegen-mips.cc
index e61c3e4..014aaf6 100644
--- a/src/full-codegen/mips/full-codegen-mips.cc
+++ b/src/full-codegen/mips/full-codegen-mips.cc
@@ -531,10 +531,12 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
-  if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+  DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
+         !lit->IsUndetectable());
+  if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
+      lit->IsFalse(isolate())) {
     if (false_label_ != fall_through_) __ Branch(false_label_);
-  } else if (lit->IsTrue() || lit->IsJSObject()) {
+  } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
     if (true_label_ != fall_through_) __ Branch(true_label_);
   } else if (lit->IsString()) {
     if (String::cast(*lit)->length() == 0) {
@@ -796,22 +798,11 @@
 
     case VariableLocation::LOOKUP: {
       Comment cmnt(masm_, "[ VariableDeclaration");
+      DCHECK_EQ(VAR, mode);
+      DCHECK(!hole_init);
       __ li(a2, Operand(variable->name()));
-      // Declaration nodes are always introduced in one of four modes.
-      DCHECK(IsDeclaredVariableMode(mode));
-      // Push initial value, if any.
-      // Note: For variables we must not push an initial value (such as
-      // 'undefined') because we may have a (legal) redeclaration and we
-      // must not destroy the current value.
-      if (hole_init) {
-        __ LoadRoot(a0, Heap::kTheHoleValueRootIndex);
-      } else {
-        DCHECK(Smi::FromInt(0) == 0);
-        __ mov(a0, zero_reg);  // Smi::FromInt(0) indicates no initial value.
-      }
-      __ Push(a2, a0);
-      __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
-      __ CallRuntime(Runtime::kDeclareLookupSlot);
+      __ Push(a2);
+      __ CallRuntime(Runtime::kDeclareEvalVar);
       PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
@@ -868,8 +859,7 @@
       PushOperand(a2);
       // Push initial value for function declaration.
       VisitForStackValue(declaration->fun());
-      PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
-      CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+      CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
       PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
@@ -1298,14 +1288,14 @@
 
 void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
                                                TypeofMode typeof_mode) {
+#ifdef DEBUG
   Variable* var = proxy->var();
   DCHECK(var->IsUnallocatedOrGlobalSlot() ||
          (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-  __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
-  __ li(LoadDescriptor::NameRegister(), Operand(var->name()));
-  __ li(LoadDescriptor::SlotRegister(),
+#endif
+  __ li(LoadGlobalDescriptor::SlotRegister(),
         Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
-  CallLoadIC(typeof_mode);
+  CallLoadGlobalIC(typeof_mode);
 }
 
 
@@ -1375,18 +1365,6 @@
 }
 
 
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
-  Comment cmnt(masm_, "[ RegExpLiteral");
-  __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
-  __ li(a1, Operand(expr->pattern()));
-  __ li(a0, Operand(Smi::FromInt(expr->flags())));
-  FastCloneRegExpStub stub(isolate());
-  __ CallStub(&stub);
-  context()->Plug(v0);
-}
-
-
 void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
   Expression* expression = (property == NULL) ? NULL : property->value();
   if (expression == NULL) {
@@ -1495,12 +1473,16 @@
         break;
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
-          accessor_table.lookup(key)->second->getter = property;
+          AccessorTable::Iterator it = accessor_table.lookup(key);
+          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->getter = property;
         }
         break;
       case ObjectLiteral::Property::SETTER:
         if (property->emit_store()) {
-          accessor_table.lookup(key)->second->setter = property;
+          AccessorTable::Iterator it = accessor_table.lookup(key);
+          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->setter = property;
         }
         break;
     }
@@ -1519,6 +1501,7 @@
     __ li(a0, Operand(Smi::FromInt(NONE)));
     PushOperand(a0);
     CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
+    PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
   }
 
   // Object literals have two parts. The "static" part on the left contains no
@@ -1564,6 +1547,8 @@
             PushOperand(Smi::FromInt(NONE));
             PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
             CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
+            PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+                                   BailoutState::NO_REGISTERS);
           } else {
             DropOperands(3);
           }
@@ -1845,7 +1830,7 @@
   // When we arrive here, v0 holds the generator object.
   __ RecordGeneratorContinuation();
   __ lw(a1, FieldMemOperand(v0, JSGeneratorObject::kResumeModeOffset));
-  __ lw(v0, FieldMemOperand(v0, JSGeneratorObject::kInputOffset));
+  __ lw(v0, FieldMemOperand(v0, JSGeneratorObject::kInputOrDebugPosOffset));
   __ Branch(&resume, eq, a1, Operand(Smi::FromInt(JSGeneratorObject::kNext)));
   __ Push(result_register());
   __ Branch(&exception, eq, a1,
@@ -2908,80 +2893,6 @@
 }
 
 
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(3, args->length());
-
-  Register string = v0;
-  Register index = a1;
-  Register value = a2;
-
-  VisitForStackValue(args->at(0));        // index
-  VisitForStackValue(args->at(1));        // value
-  VisitForAccumulatorValue(args->at(2));  // string
-  PopOperands(index, value);
-
-  if (FLAG_debug_code) {
-    __ SmiTst(value, at);
-    __ Check(eq, kNonSmiValue, at, Operand(zero_reg));
-    __ SmiTst(index, at);
-    __ Check(eq, kNonSmiIndex, at, Operand(zero_reg));
-    __ SmiUntag(index, index);
-    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
-    Register scratch = t5;
-    __ EmitSeqStringSetCharCheck(
-        string, index, value, scratch, one_byte_seq_type);
-    __ SmiTag(index, index);
-  }
-
-  __ SmiUntag(value, value);
-  __ Addu(at,
-          string,
-          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-  __ SmiUntag(index);
-  __ Addu(at, at, index);
-  __ sb(value, MemOperand(at));
-  context()->Plug(string);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(3, args->length());
-
-  Register string = v0;
-  Register index = a1;
-  Register value = a2;
-
-  VisitForStackValue(args->at(0));        // index
-  VisitForStackValue(args->at(1));        // value
-  VisitForAccumulatorValue(args->at(2));  // string
-  PopOperands(index, value);
-
-  if (FLAG_debug_code) {
-    __ SmiTst(value, at);
-    __ Check(eq, kNonSmiValue, at, Operand(zero_reg));
-    __ SmiTst(index, at);
-    __ Check(eq, kNonSmiIndex, at, Operand(zero_reg));
-    __ SmiUntag(index, index);
-    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
-    Register scratch = t5;
-    __ EmitSeqStringSetCharCheck(
-        string, index, value, scratch, two_byte_seq_type);
-    __ SmiTag(index, index);
-  }
-
-  __ SmiUntag(value, value);
-  __ Addu(at,
-          string,
-          Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  __ Addu(at, at, index);
-  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-  __ sh(value, MemOperand(at));
-    context()->Plug(string);
-}
-
-
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3018,13 +2929,8 @@
   Label need_conversion;
   Label index_out_of_range;
   Label done;
-  StringCharCodeAtGenerator generator(object,
-                                      index,
-                                      result,
-                                      &need_conversion,
-                                      &need_conversion,
-                                      &index_out_of_range,
-                                      STRING_INDEX_IS_NUMBER);
+  StringCharCodeAtGenerator generator(object, index, result, &need_conversion,
+                                      &need_conversion, &index_out_of_range);
   generator.GenerateFast(masm_);
   __ jmp(&done);
 
@@ -3048,55 +2954,6 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-
-  VisitForStackValue(args->at(0));
-  VisitForAccumulatorValue(args->at(1));
-  __ mov(a0, result_register());
-
-  Register object = a1;
-  Register index = a0;
-  Register scratch = a3;
-  Register result = v0;
-
-  PopOperand(object);
-
-  Label need_conversion;
-  Label index_out_of_range;
-  Label done;
-  StringCharAtGenerator generator(object,
-                                  index,
-                                  scratch,
-                                  result,
-                                  &need_conversion,
-                                  &need_conversion,
-                                  &index_out_of_range,
-                                  STRING_INDEX_IS_NUMBER);
-  generator.GenerateFast(masm_);
-  __ jmp(&done);
-
-  __ bind(&index_out_of_range);
-  // When the index is out of range, the spec requires us to return
-  // the empty string.
-  __ LoadRoot(result, Heap::kempty_stringRootIndex);
-  __ jmp(&done);
-
-  __ bind(&need_conversion);
-  // Move smi zero into the result register, which will trigger
-  // conversion.
-  __ li(result, Operand(Smi::FromInt(0)));
-  __ jmp(&done);
-
-  NopRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
-
-  __ bind(&done);
-  context()->Plug(result);
-}
-
-
 void FullCodeGenerator::EmitCall(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_LE(2, args->length());
@@ -3466,8 +3323,7 @@
   }
 
   // Convert old value into a number.
-  ToNumberStub convert_stub(isolate());
-  __ CallStub(&convert_stub);
+  __ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
   PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
 
   // Save result for postfix expressions.
diff --git a/src/full-codegen/mips64/full-codegen-mips64.cc b/src/full-codegen/mips64/full-codegen-mips64.cc
index a93489d..a58f173 100644
--- a/src/full-codegen/mips64/full-codegen-mips64.cc
+++ b/src/full-codegen/mips64/full-codegen-mips64.cc
@@ -530,10 +530,12 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
-  if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+  DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
+         !lit->IsUndetectable());
+  if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
+      lit->IsFalse(isolate())) {
     if (false_label_ != fall_through_) __ Branch(false_label_);
-  } else if (lit->IsTrue() || lit->IsJSObject()) {
+  } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
     if (true_label_ != fall_through_) __ Branch(true_label_);
   } else if (lit->IsString()) {
     if (String::cast(*lit)->length() == 0) {
@@ -795,22 +797,11 @@
 
     case VariableLocation::LOOKUP: {
       Comment cmnt(masm_, "[ VariableDeclaration");
+      DCHECK_EQ(VAR, mode);
+      DCHECK(!hole_init);
       __ li(a2, Operand(variable->name()));
-      // Declaration nodes are always introduced in one of four modes.
-      DCHECK(IsDeclaredVariableMode(mode));
-      // Push initial value, if any.
-      // Note: For variables we must not push an initial value (such as
-      // 'undefined') because we may have a (legal) redeclaration and we
-      // must not destroy the current value.
-      if (hole_init) {
-        __ LoadRoot(a0, Heap::kTheHoleValueRootIndex);
-      } else {
-        DCHECK(Smi::FromInt(0) == 0);
-        __ mov(a0, zero_reg);  // Smi::FromInt(0) indicates no initial value.
-      }
-      __ Push(a2, a0);
-      __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
-      __ CallRuntime(Runtime::kDeclareLookupSlot);
+      __ Push(a2);
+      __ CallRuntime(Runtime::kDeclareEvalVar);
       PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
@@ -867,8 +858,7 @@
       PushOperand(a2);
       // Push initial value for function declaration.
       VisitForStackValue(declaration->fun());
-      PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
-      CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+      CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
       PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
@@ -1299,14 +1289,14 @@
 
 void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
                                                TypeofMode typeof_mode) {
+#ifdef DEBUG
   Variable* var = proxy->var();
   DCHECK(var->IsUnallocatedOrGlobalSlot() ||
          (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-  __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
-  __ li(LoadDescriptor::NameRegister(), Operand(var->name()));
-  __ li(LoadDescriptor::SlotRegister(),
+#endif
+  __ li(LoadGlobalDescriptor::SlotRegister(),
         Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
-  CallLoadIC(typeof_mode);
+  CallLoadGlobalIC(typeof_mode);
 }
 
 
@@ -1376,18 +1366,6 @@
 }
 
 
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
-  Comment cmnt(masm_, "[ RegExpLiteral");
-  __ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
-  __ li(a1, Operand(expr->pattern()));
-  __ li(a0, Operand(Smi::FromInt(expr->flags())));
-  FastCloneRegExpStub stub(isolate());
-  __ CallStub(&stub);
-  context()->Plug(v0);
-}
-
-
 void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
   Expression* expression = (property == NULL) ? NULL : property->value();
   if (expression == NULL) {
@@ -1496,12 +1474,16 @@
         break;
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
-          accessor_table.lookup(key)->second->getter = property;
+          AccessorTable::Iterator it = accessor_table.lookup(key);
+          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->getter = property;
         }
         break;
       case ObjectLiteral::Property::SETTER:
         if (property->emit_store()) {
-          accessor_table.lookup(key)->second->setter = property;
+          AccessorTable::Iterator it = accessor_table.lookup(key);
+          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->setter = property;
         }
         break;
     }
@@ -1520,6 +1502,7 @@
     __ li(a0, Operand(Smi::FromInt(NONE)));
     PushOperand(a0);
     CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
+    PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
   }
 
   // Object literals have two parts. The "static" part on the left contains no
@@ -1565,6 +1548,8 @@
             PushOperand(Smi::FromInt(NONE));
             PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
             CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
+            PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+                                   BailoutState::NO_REGISTERS);
           } else {
             DropOperands(3);
           }
@@ -1846,7 +1831,7 @@
   // When we arrive here, v0 holds the generator object.
   __ RecordGeneratorContinuation();
   __ ld(a1, FieldMemOperand(v0, JSGeneratorObject::kResumeModeOffset));
-  __ ld(v0, FieldMemOperand(v0, JSGeneratorObject::kInputOffset));
+  __ ld(v0, FieldMemOperand(v0, JSGeneratorObject::kInputOrDebugPosOffset));
   __ Branch(&resume, eq, a1, Operand(Smi::FromInt(JSGeneratorObject::kNext)));
   __ Push(result_register());
   __ Branch(&exception, eq, a1,
@@ -2907,81 +2892,6 @@
 }
 
 
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(3, args->length());
-
-  Register string = v0;
-  Register index = a1;
-  Register value = a2;
-
-  VisitForStackValue(args->at(0));        // index
-  VisitForStackValue(args->at(1));        // value
-  VisitForAccumulatorValue(args->at(2));  // string
-  PopOperands(index, value);
-
-  if (FLAG_debug_code) {
-    __ SmiTst(value, at);
-    __ Check(eq, kNonSmiValue, at, Operand(zero_reg));
-    __ SmiTst(index, at);
-    __ Check(eq, kNonSmiIndex, at, Operand(zero_reg));
-    __ SmiUntag(index, index);
-    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
-    Register scratch = t1;
-    __ EmitSeqStringSetCharCheck(
-        string, index, value, scratch, one_byte_seq_type);
-    __ SmiTag(index, index);
-  }
-
-  __ SmiUntag(value, value);
-  __ Daddu(at,
-          string,
-          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-  __ SmiUntag(index);
-  __ Daddu(at, at, index);
-  __ sb(value, MemOperand(at));
-  context()->Plug(string);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(3, args->length());
-
-  Register string = v0;
-  Register index = a1;
-  Register value = a2;
-
-  VisitForStackValue(args->at(0));        // index
-  VisitForStackValue(args->at(1));        // value
-  VisitForAccumulatorValue(args->at(2));  // string
-  PopOperands(index, value);
-
-  if (FLAG_debug_code) {
-    __ SmiTst(value, at);
-    __ Check(eq, kNonSmiValue, at, Operand(zero_reg));
-    __ SmiTst(index, at);
-    __ Check(eq, kNonSmiIndex, at, Operand(zero_reg));
-    __ SmiUntag(index, index);
-    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
-    Register scratch = t1;
-    __ EmitSeqStringSetCharCheck(
-        string, index, value, scratch, two_byte_seq_type);
-    __ SmiTag(index, index);
-  }
-
-  __ SmiUntag(value, value);
-  __ Daddu(at,
-          string,
-          Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  __ dsra(index, index, 32 - 1);
-  __ Daddu(at, at, index);
-  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-  __ sh(value, MemOperand(at));
-    context()->Plug(string);
-}
-
-
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3018,13 +2928,8 @@
   Label need_conversion;
   Label index_out_of_range;
   Label done;
-  StringCharCodeAtGenerator generator(object,
-                                      index,
-                                      result,
-                                      &need_conversion,
-                                      &need_conversion,
-                                      &index_out_of_range,
-                                      STRING_INDEX_IS_NUMBER);
+  StringCharCodeAtGenerator generator(object, index, result, &need_conversion,
+                                      &need_conversion, &index_out_of_range);
   generator.GenerateFast(masm_);
   __ jmp(&done);
 
@@ -3048,55 +2953,6 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-
-  VisitForStackValue(args->at(0));
-  VisitForAccumulatorValue(args->at(1));
-  __ mov(a0, result_register());
-
-  Register object = a1;
-  Register index = a0;
-  Register scratch = a3;
-  Register result = v0;
-
-  PopOperand(object);
-
-  Label need_conversion;
-  Label index_out_of_range;
-  Label done;
-  StringCharAtGenerator generator(object,
-                                  index,
-                                  scratch,
-                                  result,
-                                  &need_conversion,
-                                  &need_conversion,
-                                  &index_out_of_range,
-                                  STRING_INDEX_IS_NUMBER);
-  generator.GenerateFast(masm_);
-  __ jmp(&done);
-
-  __ bind(&index_out_of_range);
-  // When the index is out of range, the spec requires us to return
-  // the empty string.
-  __ LoadRoot(result, Heap::kempty_stringRootIndex);
-  __ jmp(&done);
-
-  __ bind(&need_conversion);
-  // Move smi zero into the result register, which will trigger
-  // conversion.
-  __ li(result, Operand(Smi::FromInt(0)));
-  __ jmp(&done);
-
-  NopRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
-
-  __ bind(&done);
-  context()->Plug(result);
-}
-
-
 void FullCodeGenerator::EmitCall(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_LE(2, args->length());
@@ -3467,8 +3323,7 @@
   }
 
   // Convert old value into a number.
-  ToNumberStub convert_stub(isolate());
-  __ CallStub(&convert_stub);
+  __ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
   PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
 
   // Save result for postfix expressions.
diff --git a/src/full-codegen/ppc/OWNERS b/src/full-codegen/ppc/OWNERS
index eb007cb..752e8e3 100644
--- a/src/full-codegen/ppc/OWNERS
+++ b/src/full-codegen/ppc/OWNERS
@@ -3,3 +3,4 @@
 joransiu@ca.ibm.com
 mbrandy@us.ibm.com
 michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/src/full-codegen/ppc/full-codegen-ppc.cc b/src/full-codegen/ppc/full-codegen-ppc.cc
index 50248c1..1f47983 100644
--- a/src/full-codegen/ppc/full-codegen-ppc.cc
+++ b/src/full-codegen/ppc/full-codegen-ppc.cc
@@ -516,10 +516,12 @@
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
   codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
-  if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+  DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
+         !lit->IsUndetectable());
+  if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
+      lit->IsFalse(isolate())) {
     if (false_label_ != fall_through_) __ b(false_label_);
-  } else if (lit->IsTrue() || lit->IsJSObject()) {
+  } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
     if (true_label_ != fall_through_) __ b(true_label_);
   } else if (lit->IsString()) {
     if (String::cast(*lit)->length() == 0) {
@@ -761,21 +763,11 @@
 
     case VariableLocation::LOOKUP: {
       Comment cmnt(masm_, "[ VariableDeclaration");
+      DCHECK_EQ(VAR, mode);
+      DCHECK(!hole_init);
       __ mov(r5, Operand(variable->name()));
-      // Declaration nodes are always introduced in one of four modes.
-      DCHECK(IsDeclaredVariableMode(mode));
-      // Push initial value, if any.
-      // Note: For variables we must not push an initial value (such as
-      // 'undefined') because we may have a (legal) redeclaration and we
-      // must not destroy the current value.
-      if (hole_init) {
-        __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
-      } else {
-        __ LoadSmiLiteral(r3, Smi::FromInt(0));  // Indicates no initial value.
-      }
-      __ Push(r5, r3);
-      __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
-      __ CallRuntime(Runtime::kDeclareLookupSlot);
+      __ Push(r5);
+      __ CallRuntime(Runtime::kDeclareEvalVar);
       PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
@@ -828,8 +820,7 @@
       PushOperand(r5);
       // Push initial value for function declaration.
       VisitForStackValue(declaration->fun());
-      PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
-      CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+      CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
       PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
@@ -1265,14 +1256,14 @@
 
 void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
                                                TypeofMode typeof_mode) {
+#ifdef DEBUG
   Variable* var = proxy->var();
   DCHECK(var->IsUnallocatedOrGlobalSlot() ||
          (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-  __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
-  __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
-  __ mov(LoadDescriptor::SlotRegister(),
+#endif
+  __ mov(LoadGlobalDescriptor::SlotRegister(),
          Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
-  CallLoadIC(typeof_mode);
+  CallLoadGlobalIC(typeof_mode);
 }
 
 
@@ -1341,18 +1332,6 @@
 }
 
 
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
-  Comment cmnt(masm_, "[ RegExpLiteral");
-  __ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ LoadSmiLiteral(r5, Smi::FromInt(expr->literal_index()));
-  __ mov(r4, Operand(expr->pattern()));
-  __ LoadSmiLiteral(r3, Smi::FromInt(expr->flags()));
-  FastCloneRegExpStub stub(isolate());
-  __ CallStub(&stub);
-  context()->Plug(r3);
-}
-
-
 void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
   Expression* expression = (property == NULL) ? NULL : property->value();
   if (expression == NULL) {
@@ -1461,12 +1440,16 @@
         break;
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
-          accessor_table.lookup(key)->second->getter = property;
+          AccessorTable::Iterator it = accessor_table.lookup(key);
+          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->getter = property;
         }
         break;
       case ObjectLiteral::Property::SETTER:
         if (property->emit_store()) {
-          accessor_table.lookup(key)->second->setter = property;
+          AccessorTable::Iterator it = accessor_table.lookup(key);
+          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->setter = property;
         }
         break;
     }
@@ -1484,6 +1467,7 @@
     __ LoadSmiLiteral(r3, Smi::FromInt(NONE));
     PushOperand(r3);
     CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
+    PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
   }
 
   // Object literals have two parts. The "static" part on the left contains no
@@ -1529,6 +1513,8 @@
             PushOperand(Smi::FromInt(NONE));
             PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
             CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
+            PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+                                   BailoutState::NO_REGISTERS);
           } else {
             DropOperands(3);
           }
@@ -1807,7 +1793,7 @@
   // When we arrive here, r3 holds the generator object.
   __ RecordGeneratorContinuation();
   __ LoadP(r4, FieldMemOperand(r3, JSGeneratorObject::kResumeModeOffset));
-  __ LoadP(r3, FieldMemOperand(r3, JSGeneratorObject::kInputOffset));
+  __ LoadP(r3, FieldMemOperand(r3, JSGeneratorObject::kInputOrDebugPosOffset));
   STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
   STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
   __ CmpSmiLiteral(r4, Smi::FromInt(JSGeneratorObject::kReturn), r0);
@@ -2898,70 +2884,6 @@
 }
 
 
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(3, args->length());
-
-  Register string = r3;
-  Register index = r4;
-  Register value = r5;
-
-  VisitForStackValue(args->at(0));        // index
-  VisitForStackValue(args->at(1));        // value
-  VisitForAccumulatorValue(args->at(2));  // string
-  PopOperands(index, value);
-
-  if (FLAG_debug_code) {
-    __ TestIfSmi(value, r0);
-    __ Check(eq, kNonSmiValue, cr0);
-    __ TestIfSmi(index, r0);
-    __ Check(eq, kNonSmiIndex, cr0);
-    __ SmiUntag(index, index);
-    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
-    __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
-    __ SmiTag(index, index);
-  }
-
-  __ SmiUntag(value);
-  __ addi(ip, string, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-  __ SmiToByteArrayOffset(r0, index);
-  __ stbx(value, MemOperand(ip, r0));
-  context()->Plug(string);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(3, args->length());
-
-  Register string = r3;
-  Register index = r4;
-  Register value = r5;
-
-  VisitForStackValue(args->at(0));        // index
-  VisitForStackValue(args->at(1));        // value
-  VisitForAccumulatorValue(args->at(2));  // string
-  PopOperands(index, value);
-
-  if (FLAG_debug_code) {
-    __ TestIfSmi(value, r0);
-    __ Check(eq, kNonSmiValue, cr0);
-    __ TestIfSmi(index, r0);
-    __ Check(eq, kNonSmiIndex, cr0);
-    __ SmiUntag(index, index);
-    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
-    __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
-    __ SmiTag(index, index);
-  }
-
-  __ SmiUntag(value);
-  __ addi(ip, string, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  __ SmiToShortArrayOffset(r0, index);
-  __ sthx(value, MemOperand(ip, r0));
-  context()->Plug(string);
-}
-
-
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -2996,8 +2918,7 @@
   Label index_out_of_range;
   Label done;
   StringCharCodeAtGenerator generator(object, index, result, &need_conversion,
-                                      &need_conversion, &index_out_of_range,
-                                      STRING_INDEX_IS_NUMBER);
+                                      &need_conversion, &index_out_of_range);
   generator.GenerateFast(masm_);
   __ b(&done);
 
@@ -3021,48 +2942,6 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-  VisitForStackValue(args->at(0));
-  VisitForAccumulatorValue(args->at(1));
-
-  Register object = r4;
-  Register index = r3;
-  Register scratch = r6;
-  Register result = r3;
-
-  PopOperand(object);
-
-  Label need_conversion;
-  Label index_out_of_range;
-  Label done;
-  StringCharAtGenerator generator(object, index, scratch, result,
-                                  &need_conversion, &need_conversion,
-                                  &index_out_of_range, STRING_INDEX_IS_NUMBER);
-  generator.GenerateFast(masm_);
-  __ b(&done);
-
-  __ bind(&index_out_of_range);
-  // When the index is out of range, the spec requires us to return
-  // the empty string.
-  __ LoadRoot(result, Heap::kempty_stringRootIndex);
-  __ b(&done);
-
-  __ bind(&need_conversion);
-  // Move smi zero into the result register, which will trigger
-  // conversion.
-  __ LoadSmiLiteral(result, Smi::FromInt(0));
-  __ b(&done);
-
-  NopRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
-
-  __ bind(&done);
-  context()->Plug(result);
-}
-
-
 void FullCodeGenerator::EmitCall(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_LE(2, args->length());
@@ -3430,8 +3309,7 @@
   }
 
   // Convert old value into a number.
-  ToNumberStub convert_stub(isolate());
-  __ CallStub(&convert_stub);
+  __ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
   PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
 
   // Save result for postfix expressions.
diff --git a/src/full-codegen/s390/OWNERS b/src/full-codegen/s390/OWNERS
index eb007cb..752e8e3 100644
--- a/src/full-codegen/s390/OWNERS
+++ b/src/full-codegen/s390/OWNERS
@@ -3,3 +3,4 @@
 joransiu@ca.ibm.com
 mbrandy@us.ibm.com
 michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/src/full-codegen/s390/full-codegen-s390.cc b/src/full-codegen/s390/full-codegen-s390.cc
index 0d2107d..ee0b3e3 100644
--- a/src/full-codegen/s390/full-codegen-s390.cc
+++ b/src/full-codegen/s390/full-codegen-s390.cc
@@ -512,10 +512,12 @@
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
   codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
-  if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+  DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
+         !lit->IsUndetectable());
+  if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
+      lit->IsFalse(isolate())) {
     if (false_label_ != fall_through_) __ b(false_label_);
-  } else if (lit->IsTrue() || lit->IsJSObject()) {
+  } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
     if (true_label_ != fall_through_) __ b(true_label_);
   } else if (lit->IsString()) {
     if (String::cast(*lit)->length() == 0) {
@@ -739,21 +741,11 @@
 
     case VariableLocation::LOOKUP: {
       Comment cmnt(masm_, "[ VariableDeclaration");
+      DCHECK_EQ(VAR, mode);
+      DCHECK(!hole_init);
       __ mov(r4, Operand(variable->name()));
-      // Declaration nodes are always introduced in one of four modes.
-      DCHECK(IsDeclaredVariableMode(mode));
-      // Push initial value, if any.
-      // Note: For variables we must not push an initial value (such as
-      // 'undefined') because we may have a (legal) redeclaration and we
-      // must not destroy the current value.
-      if (hole_init) {
-        __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
-      } else {
-        __ LoadSmiLiteral(r2, Smi::FromInt(0));  // Indicates no initial value.
-      }
-      __ Push(r4, r2);
-      __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
-      __ CallRuntime(Runtime::kDeclareLookupSlot);
+      __ Push(r4);
+      __ CallRuntime(Runtime::kDeclareEvalVar);
       PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
@@ -804,8 +796,7 @@
       PushOperand(r4);
       // Push initial value for function declaration.
       VisitForStackValue(declaration->fun());
-      PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
-      CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+      CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
       PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
@@ -1229,14 +1220,14 @@
 
 void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
                                                TypeofMode typeof_mode) {
+#ifdef DEBUG
   Variable* var = proxy->var();
   DCHECK(var->IsUnallocatedOrGlobalSlot() ||
          (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-  __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
-  __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
-  __ mov(LoadDescriptor::SlotRegister(),
+#endif
+  __ mov(LoadGlobalDescriptor::SlotRegister(),
          Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
-  CallLoadIC(typeof_mode);
+  CallLoadGlobalIC(typeof_mode);
 }
 
 void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
@@ -1303,17 +1294,6 @@
   }
 }
 
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
-  Comment cmnt(masm_, "[ RegExpLiteral");
-  __ LoadP(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ LoadSmiLiteral(r4, Smi::FromInt(expr->literal_index()));
-  __ mov(r3, Operand(expr->pattern()));
-  __ LoadSmiLiteral(r2, Smi::FromInt(expr->flags()));
-  FastCloneRegExpStub stub(isolate());
-  __ CallStub(&stub);
-  context()->Plug(r2);
-}
-
 void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
   Expression* expression = (property == NULL) ? NULL : property->value();
   if (expression == NULL) {
@@ -1421,12 +1401,16 @@
         break;
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
-          accessor_table.lookup(key)->second->getter = property;
+          AccessorTable::Iterator it = accessor_table.lookup(key);
+          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->getter = property;
         }
         break;
       case ObjectLiteral::Property::SETTER:
         if (property->emit_store()) {
-          accessor_table.lookup(key)->second->setter = property;
+          AccessorTable::Iterator it = accessor_table.lookup(key);
+          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->setter = property;
         }
         break;
     }
@@ -1444,6 +1428,7 @@
     __ LoadSmiLiteral(r2, Smi::FromInt(NONE));
     PushOperand(r2);
     CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
+    PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
   }
 
   // Object literals have two parts. The "static" part on the left contains no
@@ -1489,6 +1474,8 @@
             PushOperand(Smi::FromInt(NONE));
             PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
             CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
+            PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+                                   BailoutState::NO_REGISTERS);
           } else {
             DropOperands(3);
           }
@@ -1764,7 +1751,7 @@
   // When we arrive here, r2 holds the generator object.
   __ RecordGeneratorContinuation();
   __ LoadP(r3, FieldMemOperand(r2, JSGeneratorObject::kResumeModeOffset));
-  __ LoadP(r2, FieldMemOperand(r2, JSGeneratorObject::kInputOffset));
+  __ LoadP(r2, FieldMemOperand(r2, JSGeneratorObject::kInputOrDebugPosOffset));
   STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
   STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
   __ CmpSmiLiteral(r3, Smi::FromInt(JSGeneratorObject::kReturn), r0);
@@ -2828,68 +2815,6 @@
   context()->Plug(r2);
 }
 
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(3, args->length());
-
-  Register string = r2;
-  Register index = r3;
-  Register value = r4;
-
-  VisitForStackValue(args->at(0));        // index
-  VisitForStackValue(args->at(1));        // value
-  VisitForAccumulatorValue(args->at(2));  // string
-  PopOperands(index, value);
-
-  if (FLAG_debug_code) {
-    __ TestIfSmi(value);
-    __ Check(eq, kNonSmiValue, cr0);
-    __ TestIfSmi(index);
-    __ Check(eq, kNonSmiIndex, cr0);
-    __ SmiUntag(index);
-    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
-    __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
-    __ SmiTag(index);
-  }
-
-  __ SmiUntag(value);
-  __ AddP(ip, string, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-  __ SmiToByteArrayOffset(r1, index);
-  __ StoreByte(value, MemOperand(ip, r1));
-  context()->Plug(string);
-}
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(3, args->length());
-
-  Register string = r2;
-  Register index = r3;
-  Register value = r4;
-
-  VisitForStackValue(args->at(0));        // index
-  VisitForStackValue(args->at(1));        // value
-  VisitForAccumulatorValue(args->at(2));  // string
-  PopOperands(index, value);
-
-  if (FLAG_debug_code) {
-    __ TestIfSmi(value);
-    __ Check(eq, kNonSmiValue, cr0);
-    __ TestIfSmi(index);
-    __ Check(eq, kNonSmiIndex, cr0);
-    __ SmiUntag(index, index);
-    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
-    __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
-    __ SmiTag(index, index);
-  }
-
-  __ SmiUntag(value);
-  __ SmiToShortArrayOffset(r1, index);
-  __ StoreHalfWord(value, MemOperand(r1, string, SeqTwoByteString::kHeaderSize -
-                                                     kHeapObjectTag));
-  context()->Plug(string);
-}
-
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -2923,8 +2848,7 @@
   Label index_out_of_range;
   Label done;
   StringCharCodeAtGenerator generator(object, index, result, &need_conversion,
-                                      &need_conversion, &index_out_of_range,
-                                      STRING_INDEX_IS_NUMBER);
+                                      &need_conversion, &index_out_of_range);
   generator.GenerateFast(masm_);
   __ b(&done);
 
@@ -2947,47 +2871,6 @@
   context()->Plug(result);
 }
 
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-  VisitForStackValue(args->at(0));
-  VisitForAccumulatorValue(args->at(1));
-
-  Register object = r3;
-  Register index = r2;
-  Register scratch = r5;
-  Register result = r2;
-
-  PopOperand(object);
-
-  Label need_conversion;
-  Label index_out_of_range;
-  Label done;
-  StringCharAtGenerator generator(object, index, scratch, result,
-                                  &need_conversion, &need_conversion,
-                                  &index_out_of_range, STRING_INDEX_IS_NUMBER);
-  generator.GenerateFast(masm_);
-  __ b(&done);
-
-  __ bind(&index_out_of_range);
-  // When the index is out of range, the spec requires us to return
-  // the empty string.
-  __ LoadRoot(result, Heap::kempty_stringRootIndex);
-  __ b(&done);
-
-  __ bind(&need_conversion);
-  // Move smi zero into the result register, which will trigger
-  // conversion.
-  __ LoadSmiLiteral(result, Smi::FromInt(0));
-  __ b(&done);
-
-  NopRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
-
-  __ bind(&done);
-  context()->Plug(result);
-}
-
 void FullCodeGenerator::EmitCall(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_LE(2, args->length());
@@ -3345,8 +3228,7 @@
   }
 
   // Convert old value into a number.
-  ToNumberStub convert_stub(isolate());
-  __ CallStub(&convert_stub);
+  __ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
   PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
 
   // Save result for postfix expressions.
diff --git a/src/full-codegen/x64/full-codegen-x64.cc b/src/full-codegen/x64/full-codegen-x64.cc
index 1ef9cee..eabb2f1 100644
--- a/src/full-codegen/x64/full-codegen-x64.cc
+++ b/src/full-codegen/x64/full-codegen-x64.cc
@@ -504,10 +504,12 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
-  if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+  DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
+         !lit->IsUndetectable());
+  if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
+      lit->IsFalse(isolate())) {
     if (false_label_ != fall_through_) __ jmp(false_label_);
-  } else if (lit->IsTrue() || lit->IsJSObject()) {
+  } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
     if (true_label_ != fall_through_) __ jmp(true_label_);
   } else if (lit->IsString()) {
     if (String::cast(*lit)->length() == 0) {
@@ -758,20 +760,10 @@
 
     case VariableLocation::LOOKUP: {
       Comment cmnt(masm_, "[ VariableDeclaration");
+      DCHECK_EQ(VAR, mode);
+      DCHECK(!hole_init);
       __ Push(variable->name());
-      // Declaration nodes are always introduced in one of four modes.
-      DCHECK(IsDeclaredVariableMode(mode));
-      // Push initial value, if any.
-      // Note: For variables we must not push an initial value (such as
-      // 'undefined') because we may have a (legal) redeclaration and we
-      // must not destroy the current value.
-      if (hole_init) {
-        __ PushRoot(Heap::kTheHoleValueRootIndex);
-      } else {
-        __ Push(Smi::FromInt(0));  // Indicates no initial value.
-      }
-      __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
-      __ CallRuntime(Runtime::kDeclareLookupSlot);
+      __ CallRuntime(Runtime::kDeclareEvalVar);
       PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
@@ -825,8 +817,7 @@
       Comment cmnt(masm_, "[ FunctionDeclaration");
       PushOperand(variable->name());
       VisitForStackValue(declaration->fun());
-      PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
-      CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+      CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
       PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
@@ -1255,14 +1246,14 @@
 
 void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
                                                TypeofMode typeof_mode) {
+#ifdef DEBUG
   Variable* var = proxy->var();
   DCHECK(var->IsUnallocatedOrGlobalSlot() ||
          (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-  __ Move(LoadDescriptor::NameRegister(), var->name());
-  __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
-  __ Move(LoadDescriptor::SlotRegister(),
+#endif
+  __ Move(LoadGlobalDescriptor::SlotRegister(),
           SmiFromSlot(proxy->VariableFeedbackSlot()));
-  CallLoadIC(typeof_mode);
+  CallLoadGlobalIC(typeof_mode);
 }
 
 
@@ -1331,18 +1322,6 @@
 }
 
 
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
-  Comment cmnt(masm_, "[ RegExpLiteral");
-  __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-  __ Move(rax, Smi::FromInt(expr->literal_index()));
-  __ Move(rcx, expr->pattern());
-  __ Move(rdx, Smi::FromInt(expr->flags()));
-  FastCloneRegExpStub stub(isolate());
-  __ CallStub(&stub);
-  context()->Plug(rax);
-}
-
-
 void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
   Expression* expression = (property == NULL) ? NULL : property->value();
   if (expression == NULL) {
@@ -1449,12 +1428,16 @@
         break;
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
-          accessor_table.lookup(key)->second->getter = property;
+          AccessorTable::Iterator it = accessor_table.lookup(key);
+          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->getter = property;
         }
         break;
       case ObjectLiteral::Property::SETTER:
         if (property->emit_store()) {
-          accessor_table.lookup(key)->second->setter = property;
+          AccessorTable::Iterator it = accessor_table.lookup(key);
+          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->setter = property;
         }
         break;
     }
@@ -1471,6 +1454,7 @@
     EmitAccessor(it->second->setter);
     PushOperand(Smi::FromInt(NONE));
     CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
+    PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
   }
 
   // Object literals have two parts. The "static" part on the left contains no
@@ -1515,6 +1499,8 @@
             PushOperand(Smi::FromInt(NONE));
             PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
             CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
+            PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+                                   BailoutState::NO_REGISTERS);
           } else {
             DropOperands(3);
           }
@@ -1789,7 +1775,7 @@
   // When we arrive here, rax holds the generator object.
   __ RecordGeneratorContinuation();
   __ movp(rbx, FieldOperand(rax, JSGeneratorObject::kResumeModeOffset));
-  __ movp(rax, FieldOperand(rax, JSGeneratorObject::kInputOffset));
+  __ movp(rax, FieldOperand(rax, JSGeneratorObject::kInputOrDebugPosOffset));
   STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
   STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
   __ SmiCompare(rbx, Smi::FromInt(JSGeneratorObject::kReturn));
@@ -2780,72 +2766,6 @@
 }
 
 
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(3, args->length());
-
-  Register string = rax;
-  Register index = rbx;
-  Register value = rcx;
-
-  VisitForStackValue(args->at(0));        // index
-  VisitForStackValue(args->at(1));        // value
-  VisitForAccumulatorValue(args->at(2));  // string
-  PopOperand(value);
-  PopOperand(index);
-
-  if (FLAG_debug_code) {
-    __ Check(__ CheckSmi(value), kNonSmiValue);
-    __ Check(__ CheckSmi(index), kNonSmiValue);
-  }
-
-  __ SmiToInteger32(value, value);
-  __ SmiToInteger32(index, index);
-
-  if (FLAG_debug_code) {
-    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
-    __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
-  }
-
-  __ movb(FieldOperand(string, index, times_1, SeqOneByteString::kHeaderSize),
-          value);
-  context()->Plug(string);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(3, args->length());
-
-  Register string = rax;
-  Register index = rbx;
-  Register value = rcx;
-
-  VisitForStackValue(args->at(0));        // index
-  VisitForStackValue(args->at(1));        // value
-  VisitForAccumulatorValue(args->at(2));  // string
-  PopOperand(value);
-  PopOperand(index);
-
-  if (FLAG_debug_code) {
-    __ Check(__ CheckSmi(value), kNonSmiValue);
-    __ Check(__ CheckSmi(index), kNonSmiValue);
-  }
-
-  __ SmiToInteger32(value, value);
-  __ SmiToInteger32(index, index);
-
-  if (FLAG_debug_code) {
-    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
-    __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
-  }
-
-  __ movw(FieldOperand(string, index, times_2, SeqTwoByteString::kHeaderSize),
-          value);
-  context()->Plug(rax);
-}
-
-
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -2881,13 +2801,8 @@
   Label need_conversion;
   Label index_out_of_range;
   Label done;
-  StringCharCodeAtGenerator generator(object,
-                                      index,
-                                      result,
-                                      &need_conversion,
-                                      &need_conversion,
-                                      &index_out_of_range,
-                                      STRING_INDEX_IS_NUMBER);
+  StringCharCodeAtGenerator generator(object, index, result, &need_conversion,
+                                      &need_conversion, &index_out_of_range);
   generator.GenerateFast(masm_);
   __ jmp(&done);
 
@@ -2911,54 +2826,6 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-
-  VisitForStackValue(args->at(0));
-  VisitForAccumulatorValue(args->at(1));
-
-  Register object = rbx;
-  Register index = rax;
-  Register scratch = rdx;
-  Register result = rax;
-
-  PopOperand(object);
-
-  Label need_conversion;
-  Label index_out_of_range;
-  Label done;
-  StringCharAtGenerator generator(object,
-                                  index,
-                                  scratch,
-                                  result,
-                                  &need_conversion,
-                                  &need_conversion,
-                                  &index_out_of_range,
-                                  STRING_INDEX_IS_NUMBER);
-  generator.GenerateFast(masm_);
-  __ jmp(&done);
-
-  __ bind(&index_out_of_range);
-  // When the index is out of range, the spec requires us to return
-  // the empty string.
-  __ LoadRoot(result, Heap::kempty_stringRootIndex);
-  __ jmp(&done);
-
-  __ bind(&need_conversion);
-  // Move smi zero into the result register, which will trigger
-  // conversion.
-  __ Move(result, Smi::FromInt(0));
-  __ jmp(&done);
-
-  NopRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
-
-  __ bind(&done);
-  context()->Plug(result);
-}
-
-
 void FullCodeGenerator::EmitCall(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_LE(2, args->length());
@@ -3335,8 +3202,7 @@
   }
 
   // Convert old value into a number.
-  ToNumberStub convert_stub(isolate());
-  __ CallStub(&convert_stub);
+  __ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
   PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
 
   // Save result for postfix expressions.
diff --git a/src/full-codegen/x87/full-codegen-x87.cc b/src/full-codegen/x87/full-codegen-x87.cc
index d7403fa..2fb9961 100644
--- a/src/full-codegen/x87/full-codegen-x87.cc
+++ b/src/full-codegen/x87/full-codegen-x87.cc
@@ -487,10 +487,12 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
-  if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+  DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
+         !lit->IsUndetectable());
+  if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
+      lit->IsFalse(isolate())) {
     if (false_label_ != fall_through_) __ jmp(false_label_);
-  } else if (lit->IsTrue() || lit->IsJSObject()) {
+  } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
     if (true_label_ != fall_through_) __ jmp(true_label_);
   } else if (lit->IsString()) {
     if (String::cast(*lit)->length() == 0) {
@@ -743,21 +745,10 @@
 
     case VariableLocation::LOOKUP: {
       Comment cmnt(masm_, "[ VariableDeclaration");
+      DCHECK_EQ(VAR, mode);
+      DCHECK(!hole_init);
       __ push(Immediate(variable->name()));
-      // VariableDeclaration nodes are always introduced in one of four modes.
-      DCHECK(IsDeclaredVariableMode(mode));
-      // Push initial value, if any.
-      // Note: For variables we must not push an initial value (such as
-      // 'undefined') because we may have a (legal) redeclaration and we
-      // must not destroy the current value.
-      if (hole_init) {
-        __ push(Immediate(isolate()->factory()->the_hole_value()));
-      } else {
-        __ push(Immediate(Smi::FromInt(0)));  // Indicates no initial value.
-      }
-      __ push(
-          Immediate(Smi::FromInt(variable->DeclarationPropertyAttributes())));
-      __ CallRuntime(Runtime::kDeclareLookupSlot);
+      __ CallRuntime(Runtime::kDeclareEvalVar);
       PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
@@ -805,8 +796,7 @@
       Comment cmnt(masm_, "[ FunctionDeclaration");
       PushOperand(variable->name());
       VisitForStackValue(declaration->fun());
-      PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
-      CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+      CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
       PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
@@ -1218,17 +1208,14 @@
 
 void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
                                                TypeofMode typeof_mode) {
+#ifdef DEBUG
   Variable* var = proxy->var();
   DCHECK(var->IsUnallocatedOrGlobalSlot() ||
          (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-  __ mov(LoadDescriptor::ReceiverRegister(), NativeContextOperand());
-  __ mov(LoadDescriptor::ReceiverRegister(),
-         ContextOperand(LoadDescriptor::ReceiverRegister(),
-                        Context::EXTENSION_INDEX));
-  __ mov(LoadDescriptor::NameRegister(), var->name());
-  __ mov(LoadDescriptor::SlotRegister(),
+#endif
+  __ mov(LoadGlobalDescriptor::SlotRegister(),
          Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
-  CallLoadIC(typeof_mode);
+  CallLoadGlobalIC(typeof_mode);
 }
 
 
@@ -1297,18 +1284,6 @@
 }
 
 
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
-  Comment cmnt(masm_, "[ RegExpLiteral");
-  __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-  __ Move(eax, Immediate(Smi::FromInt(expr->literal_index())));
-  __ Move(ecx, Immediate(expr->pattern()));
-  __ Move(edx, Immediate(Smi::FromInt(expr->flags())));
-  FastCloneRegExpStub stub(isolate());
-  __ CallStub(&stub);
-  context()->Plug(eax);
-}
-
-
 void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
   Expression* expression = (property == NULL) ? NULL : property->value();
   if (expression == NULL) {
@@ -1415,12 +1390,16 @@
         break;
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
-          accessor_table.lookup(key)->second->getter = property;
+          AccessorTable::Iterator it = accessor_table.lookup(key);
+          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->getter = property;
         }
         break;
       case ObjectLiteral::Property::SETTER:
         if (property->emit_store()) {
-          accessor_table.lookup(key)->second->setter = property;
+          AccessorTable::Iterator it = accessor_table.lookup(key);
+          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->setter = property;
         }
         break;
     }
@@ -1439,6 +1418,7 @@
 
     PushOperand(Smi::FromInt(NONE));
     CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
+    PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
   }
 
   // Object literals have two parts. The "static" part on the left contains no
@@ -1483,6 +1463,8 @@
             PushOperand(Smi::FromInt(NONE));
             PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
             CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
+            PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+                                   BailoutState::NO_REGISTERS);
           } else {
             DropOperands(3);
           }
@@ -1758,7 +1740,7 @@
   // When we arrive here, eax holds the generator object.
   __ RecordGeneratorContinuation();
   __ mov(ebx, FieldOperand(eax, JSGeneratorObject::kResumeModeOffset));
-  __ mov(eax, FieldOperand(eax, JSGeneratorObject::kInputOffset));
+  __ mov(eax, FieldOperand(eax, JSGeneratorObject::kInputOrDebugPosOffset));
   STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
   STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
   __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::kReturn)));
@@ -2783,75 +2765,6 @@
 }
 
 
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(3, args->length());
-
-  Register string = eax;
-  Register index = ebx;
-  Register value = ecx;
-
-  VisitForStackValue(args->at(0));        // index
-  VisitForStackValue(args->at(1));        // value
-  VisitForAccumulatorValue(args->at(2));  // string
-
-  PopOperand(value);
-  PopOperand(index);
-
-  if (FLAG_debug_code) {
-    __ test(value, Immediate(kSmiTagMask));
-    __ Check(zero, kNonSmiValue);
-    __ test(index, Immediate(kSmiTagMask));
-    __ Check(zero, kNonSmiValue);
-  }
-
-  __ SmiUntag(value);
-  __ SmiUntag(index);
-
-  if (FLAG_debug_code) {
-    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
-    __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
-  }
-
-  __ mov_b(FieldOperand(string, index, times_1, SeqOneByteString::kHeaderSize),
-           value);
-  context()->Plug(string);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(3, args->length());
-
-  Register string = eax;
-  Register index = ebx;
-  Register value = ecx;
-
-  VisitForStackValue(args->at(0));        // index
-  VisitForStackValue(args->at(1));        // value
-  VisitForAccumulatorValue(args->at(2));  // string
-  PopOperand(value);
-  PopOperand(index);
-
-  if (FLAG_debug_code) {
-    __ test(value, Immediate(kSmiTagMask));
-    __ Check(zero, kNonSmiValue);
-    __ test(index, Immediate(kSmiTagMask));
-    __ Check(zero, kNonSmiValue);
-    __ SmiUntag(index);
-    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
-    __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
-    __ SmiTag(index);
-  }
-
-  __ SmiUntag(value);
-  // No need to untag a smi for two-byte addressing.
-  __ mov_w(FieldOperand(string, index, times_1, SeqTwoByteString::kHeaderSize),
-           value);
-  context()->Plug(string);
-}
-
-
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -2887,13 +2800,8 @@
   Label need_conversion;
   Label index_out_of_range;
   Label done;
-  StringCharCodeAtGenerator generator(object,
-                                      index,
-                                      result,
-                                      &need_conversion,
-                                      &need_conversion,
-                                      &index_out_of_range,
-                                      STRING_INDEX_IS_NUMBER);
+  StringCharCodeAtGenerator generator(object, index, result, &need_conversion,
+                                      &need_conversion, &index_out_of_range);
   generator.GenerateFast(masm_);
   __ jmp(&done);
 
@@ -2917,54 +2825,6 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-
-  VisitForStackValue(args->at(0));
-  VisitForAccumulatorValue(args->at(1));
-
-  Register object = ebx;
-  Register index = eax;
-  Register scratch = edx;
-  Register result = eax;
-
-  PopOperand(object);
-
-  Label need_conversion;
-  Label index_out_of_range;
-  Label done;
-  StringCharAtGenerator generator(object,
-                                  index,
-                                  scratch,
-                                  result,
-                                  &need_conversion,
-                                  &need_conversion,
-                                  &index_out_of_range,
-                                  STRING_INDEX_IS_NUMBER);
-  generator.GenerateFast(masm_);
-  __ jmp(&done);
-
-  __ bind(&index_out_of_range);
-  // When the index is out of range, the spec requires us to return
-  // the empty string.
-  __ Move(result, Immediate(isolate()->factory()->empty_string()));
-  __ jmp(&done);
-
-  __ bind(&need_conversion);
-  // Move smi zero into the result register, which will trigger
-  // conversion.
-  __ Move(result, Immediate(Smi::FromInt(0)));
-  __ jmp(&done);
-
-  NopRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
-
-  __ bind(&done);
-  context()->Plug(result);
-}
-
-
 void FullCodeGenerator::EmitCall(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_LE(2, args->length());
@@ -3343,8 +3203,7 @@
   }
 
   // Convert old value into a number.
-  ToNumberStub convert_stub(isolate());
-  __ CallStub(&convert_stub);
+  __ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
   PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
 
   // Save result for postfix expressions.
diff --git a/src/futex-emulation.cc b/src/futex-emulation.cc
index 991e4c3..1f7e8a6 100644
--- a/src/futex-emulation.cc
+++ b/src/futex-emulation.cc
@@ -142,7 +142,7 @@
     // be false, so we'll loop and then check interrupts.
     if (interrupted) {
       Object* interrupt_object = isolate->stack_guard()->HandleInterrupts();
-      if (interrupt_object->IsException()) {
+      if (interrupt_object->IsException(isolate)) {
         result = interrupt_object;
         mutex_.Pointer()->Lock();
         break;
diff --git a/src/gdb-jit.cc b/src/gdb-jit.cc
index 0df5975..e73b733 100644
--- a/src/gdb-jit.cc
+++ b/src/gdb-jit.cc
@@ -2012,17 +2012,19 @@
   return static_cast<uint32_t>((offset >> kCodeAlignmentBits) * kGoldenRatio);
 }
 
-
-static HashMap* GetLineMap() {
-  static HashMap* line_map = NULL;
-  if (line_map == NULL) line_map = new HashMap(&HashMap::PointersMatch);
+static base::HashMap* GetLineMap() {
+  static base::HashMap* line_map = NULL;
+  if (line_map == NULL) {
+    line_map = new base::HashMap(&base::HashMap::PointersMatch);
+  }
   return line_map;
 }
 
 
 static void PutLineInfo(Address addr, LineInfo* info) {
-  HashMap* line_map = GetLineMap();
-  HashMap::Entry* e = line_map->LookupOrInsert(addr, HashCodeAddress(addr));
+  base::HashMap* line_map = GetLineMap();
+  base::HashMap::Entry* e =
+      line_map->LookupOrInsert(addr, HashCodeAddress(addr));
   if (e->value != NULL) delete static_cast<LineInfo*>(e->value);
   e->value = info;
 }
diff --git a/src/global-handles.cc b/src/global-handles.cc
index 82b4fcd..dffa237 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -879,7 +879,7 @@
     PrintInternalFields(js_object);
     PrintF(" ] }\n");
   } else {
-    PrintF("object of unexpected type: %p\n", object);
+    PrintF("object of unexpected type: %p\n", static_cast<void*>(object));
   }
 }
 
@@ -891,7 +891,7 @@
     if (name->length() == 0) name = constructor->shared()->inferred_name();
 
     PrintF("%s", name->ToCString().get());
-  } else if (maybe_constructor->IsNull()) {
+  } else if (maybe_constructor->IsNull(isolate_)) {
     if (js_object->IsOddball()) {
       PrintF("<oddball>");
     } else {
@@ -907,7 +907,7 @@
     if (i != 0) {
       PrintF(", ");
     }
-    PrintF("%p", js_object->GetInternalField(i));
+    PrintF("%p", static_cast<void*>(js_object->GetInternalField(i)));
   }
 }
 
diff --git a/src/globals.h b/src/globals.h
index ed297e7..a31f237 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -150,12 +150,21 @@
 // encoded immediate, the addresses have to be in range of 256MB aligned
 // region. Used only for large object space.
 const size_t kMaximalCodeRangeSize = 256 * MB;
+const size_t kCodeRangeAreaAlignment = 256 * MB;
+#elif V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
+const size_t kMaximalCodeRangeSize = 512 * MB;
+const size_t kCodeRangeAreaAlignment = 64 * KB;  // OS page on PPC Linux
 #else
 const size_t kMaximalCodeRangeSize = 512 * MB;
+const size_t kCodeRangeAreaAlignment = 4 * KB;  // OS page.
 #endif
 #if V8_OS_WIN
 const size_t kMinimumCodeRangeSize = 4 * MB;
 const size_t kReservedCodeRangePages = 1;
+// On PPC Linux PageSize is 4MB
+#elif V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
+const size_t kMinimumCodeRangeSize = 12 * MB;
+const size_t kReservedCodeRangePages = 0;
 #else
 const size_t kMinimumCodeRangeSize = 3 * MB;
 const size_t kReservedCodeRangePages = 0;
@@ -169,14 +178,24 @@
 const bool kRequiresCodeRange = true;
 const size_t kMaximalCodeRangeSize = 256 * MB;
 const size_t kMinimumCodeRangeSize = 3 * MB;
-const size_t kReservedCodeRangePages = 0;
+const size_t kCodeRangeAreaAlignment = 4 * KB;  // OS page.
+#elif V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
+const bool kRequiresCodeRange = false;
+const size_t kMaximalCodeRangeSize = 0 * MB;
+const size_t kMinimumCodeRangeSize = 0 * MB;
+const size_t kCodeRangeAreaAlignment = 64 * KB;  // OS page on PPC Linux
 #else
 const bool kRequiresCodeRange = false;
 const size_t kMaximalCodeRangeSize = 0 * MB;
 const size_t kMinimumCodeRangeSize = 0 * MB;
+const size_t kCodeRangeAreaAlignment = 4 * KB;  // OS page.
+#endif
 const size_t kReservedCodeRangePages = 0;
 #endif
-#endif
+
+// The external allocation limit should be below 256 MB on all architectures
+// to avoid that resource-constrained embedders run low on memory.
+const int kExternalAllocationLimit = 192 * 1024 * 1024;
 
 STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
 
@@ -452,6 +471,26 @@
   kSimd128Unaligned
 };
 
+// Possible outcomes for decisions.
+enum class Decision : uint8_t { kUnknown, kTrue, kFalse };
+
+inline size_t hash_value(Decision decision) {
+  return static_cast<uint8_t>(decision);
+}
+
+inline std::ostream& operator<<(std::ostream& os, Decision decision) {
+  switch (decision) {
+    case Decision::kUnknown:
+      return os << "Unknown";
+    case Decision::kTrue:
+      return os << "True";
+    case Decision::kFalse:
+      return os << "False";
+  }
+  UNREACHABLE();
+  return os;
+}
+
 // Supported write barrier modes.
 enum WriteBarrierKind : uint8_t {
   kNoWriteBarrier,
@@ -549,6 +588,8 @@
   int instr_size;
   int reloc_size;
   int constant_pool_size;
+  byte* unwinding_info;
+  int unwinding_info_size;
   Assembler* origin;
 };
 
@@ -580,8 +621,6 @@
   MEGAMORPHIC,
   // A generic handler is installed and no extra typefeedback is recorded.
   GENERIC,
-  // Special state for debug break or step in prepare stubs.
-  DEBUG_STUB
 };
 
 enum CacheHolderFlag {
@@ -591,6 +630,7 @@
   kCacheOnReceiver
 };
 
+enum WhereToStart { kStartAtReceiver, kStartAtPrototype };
 
 // The Store Buffer (GC).
 typedef enum {
@@ -640,6 +680,15 @@
   } bits;
 };
 
+#if V8_TARGET_LITTLE_ENDIAN
+typedef IeeeDoubleLittleEndianArchType IeeeDoubleArchType;
+const int kIeeeDoubleMantissaWordOffset = 0;
+const int kIeeeDoubleExponentWordOffset = 4;
+#else
+typedef IeeeDoubleBigEndianArchType IeeeDoubleArchType;
+const int kIeeeDoubleMantissaWordOffset = 4;
+const int kIeeeDoubleExponentWordOffset = 0;
+#endif
 
 // AccessorCallback
 struct AccessorDescriptor {
@@ -693,7 +742,6 @@
   ARMv7,
   ARMv8,
   SUDIV,
-  MLS,
   UNALIGNED_ACCESSES,
   MOVW_MOVT_IMMEDIATE_LOADS,
   VFP32DREGS,
@@ -800,8 +848,14 @@
 };
 
 // The mips architecture prior to revision 5 has inverted encoding for sNaN.
-#if (V8_TARGET_ARCH_MIPS && !defined(_MIPS_ARCH_MIPS32R6)) || \
-    (V8_TARGET_ARCH_MIPS64 && !defined(_MIPS_ARCH_MIPS64R6))
+// The x87 FPU convert the sNaN to qNaN automatically when loading sNaN from
+// memmory.
+// Use mips sNaN which is a not used qNaN in x87 port as sNaN to workaround this
+// issue
+// for some test cases.
+#if (V8_TARGET_ARCH_MIPS && !defined(_MIPS_ARCH_MIPS32R6)) ||   \
+    (V8_TARGET_ARCH_MIPS64 && !defined(_MIPS_ARCH_MIPS64R6)) || \
+    (V8_TARGET_ARCH_X87)
 const uint32_t kHoleNanUpper32 = 0xFFFF7FFF;
 const uint32_t kHoleNanLower32 = 0xFFFF7FFF;
 #else
@@ -1007,6 +1061,10 @@
   return kind & FunctionKind::kAsyncFunction;
 }
 
+inline bool IsResumableFunction(FunctionKind kind) {
+  return IsGeneratorFunction(kind) || IsAsyncFunction(kind);
+}
+
 inline bool IsConciseMethod(FunctionKind kind) {
   DCHECK(IsValidFunctionKind(kind));
   return kind & FunctionKind::kConciseMethod;
diff --git a/src/heap-symbols.h b/src/heap-symbols.h
index 529342a..03b25ee 100644
--- a/src/heap-symbols.h
+++ b/src/heap-symbols.h
@@ -12,6 +12,16 @@
   V(arguments_string, "arguments")                                 \
   V(Arguments_string, "Arguments")                                 \
   V(Array_string, "Array")                                         \
+  V(arguments_to_string, "[object Arguments]")                     \
+  V(array_to_string, "[object Array]")                             \
+  V(boolean_to_string, "[object Boolean]")                         \
+  V(date_to_string, "[object Date]")                               \
+  V(error_to_string, "[object Error]")                             \
+  V(function_to_string, "[object Function]")                       \
+  V(number_to_string, "[object Number]")                           \
+  V(object_to_string, "[object Object]")                           \
+  V(regexp_to_string, "[object RegExp]")                           \
+  V(string_to_string, "[object String]")                           \
   V(bind_string, "bind")                                           \
   V(bool16x8_string, "bool16x8")                                   \
   V(Bool16x8_string, "Bool16x8")                                   \
@@ -22,6 +32,7 @@
   V(boolean_string, "boolean")                                     \
   V(Boolean_string, "Boolean")                                     \
   V(bound__string, "bound ")                                       \
+  V(buffer_string, "buffer")                                       \
   V(byte_length_string, "byteLength")                              \
   V(byte_offset_string, "byteOffset")                              \
   V(call_string, "call")                                           \
@@ -30,6 +41,7 @@
   V(cell_value_string, "%cell_value")                              \
   V(char_at_string, "CharAt")                                      \
   V(closure_string, "(closure)")                                   \
+  V(column_string, "column")                                       \
   V(compare_ic_string, "==")                                       \
   V(configurable_string, "configurable")                           \
   V(constructor_string, "constructor")                             \
@@ -78,6 +90,7 @@
   V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic")         \
   V(last_index_string, "lastIndex")                                \
   V(length_string, "length")                                       \
+  V(line_string, "line")                                           \
   V(Map_string, "Map")                                             \
   V(minus_infinity_string, "-Infinity")                            \
   V(minus_zero_string, "-0")                                       \
@@ -91,6 +104,7 @@
   V(object_string, "object")                                       \
   V(Object_string, "Object")                                       \
   V(ownKeys_string, "ownKeys")                                     \
+  V(position_string, "position")                                   \
   V(preventExtensions_string, "preventExtensions")                 \
   V(private_api_string, "private_api")                             \
   V(Promise_string, "Promise")                                     \
@@ -99,11 +113,13 @@
   V(Proxy_string, "Proxy")                                         \
   V(query_colon_string, "(?:)")                                    \
   V(RegExp_string, "RegExp")                                       \
+  V(script_string, "script")                                       \
   V(setPrototypeOf_string, "setPrototypeOf")                       \
   V(set_string, "set")                                             \
   V(Set_string, "Set")                                             \
   V(source_mapping_url_string, "source_mapping_url")               \
   V(source_string, "source")                                       \
+  V(sourceText_string, "sourceText")                               \
   V(source_url_string, "source_url")                               \
   V(stack_string, "stack")                                         \
   V(strict_compare_ic_string, "===")                               \
@@ -166,12 +182,13 @@
   V(premonomorphic_symbol)                  \
   V(promise_combined_deferred_symbol)       \
   V(promise_debug_marker_symbol)            \
-  V(promise_has_handler_symbol)             \
+  V(promise_deferred_reactions_symbol)      \
   V(promise_fulfill_reactions_symbol)       \
-  V(promise_reject_reactions_symbol)        \
+  V(promise_has_handler_symbol)             \
   V(promise_raw_symbol)                     \
-  V(promise_state_symbol)                   \
+  V(promise_reject_reactions_symbol)        \
   V(promise_result_symbol)                  \
+  V(promise_state_symbol)                   \
   V(sealed_symbol)                          \
   V(stack_trace_symbol)                     \
   V(strict_function_transition_symbol)      \
diff --git a/src/heap/array-buffer-tracker-inl.h b/src/heap/array-buffer-tracker-inl.h
new file mode 100644
index 0000000..a176744
--- /dev/null
+++ b/src/heap/array-buffer-tracker-inl.h
@@ -0,0 +1,67 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/conversions-inl.h"
+#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/heap.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer* buffer) {
+  void* data = buffer->backing_store();
+  if (!data) return;
+
+  size_t length = NumberToSize(heap->isolate(), buffer->byte_length());
+  Page* page = Page::FromAddress(buffer->address());
+  {
+    base::LockGuard<base::Mutex> guard(page->mutex());
+    LocalArrayBufferTracker* tracker = page->local_tracker();
+    if (tracker == nullptr) {
+      page->AllocateLocalTracker();
+      tracker = page->local_tracker();
+    }
+    DCHECK_NOT_NULL(tracker);
+    tracker->Add(buffer, std::make_pair(data, length));
+  }
+  // We may go over the limit of externally allocated memory here. We call the
+  // api function to trigger a GC in this case.
+  reinterpret_cast<v8::Isolate*>(heap->isolate())
+      ->AdjustAmountOfExternalAllocatedMemory(length);
+}
+
+void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer* buffer) {
+  void* data = buffer->backing_store();
+  if (!data) return;
+
+  Page* page = Page::FromAddress(buffer->address());
+  size_t length = 0;
+  {
+    base::LockGuard<base::Mutex> guard(page->mutex());
+    LocalArrayBufferTracker* tracker = page->local_tracker();
+    DCHECK_NOT_NULL(tracker);
+    length = tracker->Remove(buffer).second;
+  }
+  heap->update_external_memory(-static_cast<intptr_t>(length));
+}
+
+void LocalArrayBufferTracker::Add(Key key, const Value& value) {
+  auto ret = array_buffers_.insert(std::make_pair(key, value));
+  USE(ret);
+  // Check that we indeed inserted a new value and did not overwrite an existing
+  // one (which would be a bug).
+  DCHECK(ret.second);
+}
+
+LocalArrayBufferTracker::Value LocalArrayBufferTracker::Remove(Key key) {
+  TrackingMap::iterator it = array_buffers_.find(key);
+  DCHECK(it != array_buffers_.end());
+  Value value = it->second;
+  array_buffers_.erase(it);
+  return value;
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/heap/array-buffer-tracker.cc b/src/heap/array-buffer-tracker.cc
index 6e389c1..a870b35 100644
--- a/src/heap/array-buffer-tracker.cc
+++ b/src/heap/array-buffer-tracker.cc
@@ -3,138 +3,133 @@
 // found in the LICENSE file.
 
 #include "src/heap/array-buffer-tracker.h"
+#include "src/heap/array-buffer-tracker-inl.h"
 #include "src/heap/heap.h"
-#include "src/isolate.h"
-#include "src/objects.h"
-#include "src/objects-inl.h"
-#include "src/v8.h"
 
 namespace v8 {
 namespace internal {
 
-ArrayBufferTracker::~ArrayBufferTracker() {
-  Isolate* isolate = heap()->isolate();
+LocalArrayBufferTracker::~LocalArrayBufferTracker() {
+  CHECK(array_buffers_.empty());
+}
+
+template <LocalArrayBufferTracker::FreeMode free_mode>
+void LocalArrayBufferTracker::Free() {
   size_t freed_memory = 0;
-  for (auto& buffer : live_array_buffers_) {
-    isolate->array_buffer_allocator()->Free(buffer.first, buffer.second);
-    freed_memory += buffer.second;
-  }
-  for (auto& buffer : live_array_buffers_for_scavenge_) {
-    isolate->array_buffer_allocator()->Free(buffer.first, buffer.second);
-    freed_memory += buffer.second;
-  }
-  live_array_buffers_.clear();
-  live_array_buffers_for_scavenge_.clear();
-  not_yet_discovered_array_buffers_.clear();
-  not_yet_discovered_array_buffers_for_scavenge_.clear();
-
-  if (freed_memory > 0) {
-    heap()->update_amount_of_external_allocated_memory(
-        -static_cast<int64_t>(freed_memory));
-  }
-}
-
-
-void ArrayBufferTracker::RegisterNew(JSArrayBuffer* buffer) {
-  void* data = buffer->backing_store();
-  if (!data) return;
-
-  bool in_new_space = heap()->InNewSpace(buffer);
-  size_t length = NumberToSize(heap()->isolate(), buffer->byte_length());
-  if (in_new_space) {
-    live_array_buffers_for_scavenge_[data] = length;
-  } else {
-    live_array_buffers_[data] = length;
-  }
-
-  // We may go over the limit of externally allocated memory here. We call the
-  // api function to trigger a GC in this case.
-  reinterpret_cast<v8::Isolate*>(heap()->isolate())
-      ->AdjustAmountOfExternalAllocatedMemory(length);
-}
-
-
-void ArrayBufferTracker::Unregister(JSArrayBuffer* buffer) {
-  void* data = buffer->backing_store();
-  if (!data) return;
-
-  bool in_new_space = heap()->InNewSpace(buffer);
-  std::map<void*, size_t>* live_buffers =
-      in_new_space ? &live_array_buffers_for_scavenge_ : &live_array_buffers_;
-  std::map<void*, size_t>* not_yet_discovered_buffers =
-      in_new_space ? &not_yet_discovered_array_buffers_for_scavenge_
-                   : &not_yet_discovered_array_buffers_;
-
-  DCHECK(live_buffers->count(data) > 0);
-
-  size_t length = (*live_buffers)[data];
-  live_buffers->erase(data);
-  not_yet_discovered_buffers->erase(data);
-
-  heap()->update_amount_of_external_allocated_memory(
-      -static_cast<int64_t>(length));
-}
-
-
-void ArrayBufferTracker::MarkLive(JSArrayBuffer* buffer) {
-  base::LockGuard<base::Mutex> guard(&mutex_);
-  void* data = buffer->backing_store();
-
-  // ArrayBuffer might be in the middle of being constructed.
-  if (data == heap()->undefined_value()) return;
-  if (heap()->InNewSpace(buffer)) {
-    not_yet_discovered_array_buffers_for_scavenge_.erase(data);
-  } else {
-    not_yet_discovered_array_buffers_.erase(data);
-  }
-}
-
-
-void ArrayBufferTracker::FreeDead(bool from_scavenge) {
-  size_t freed_memory = 0;
-  Isolate* isolate = heap()->isolate();
-  for (auto& buffer : not_yet_discovered_array_buffers_for_scavenge_) {
-    isolate->array_buffer_allocator()->Free(buffer.first, buffer.second);
-    freed_memory += buffer.second;
-    live_array_buffers_for_scavenge_.erase(buffer.first);
-  }
-
-  if (!from_scavenge) {
-    for (auto& buffer : not_yet_discovered_array_buffers_) {
-      isolate->array_buffer_allocator()->Free(buffer.first, buffer.second);
-      freed_memory += buffer.second;
-      live_array_buffers_.erase(buffer.first);
+  for (TrackingMap::iterator it = array_buffers_.begin();
+       it != array_buffers_.end();) {
+    if ((free_mode == kFreeAll) ||
+        Marking::IsWhite(Marking::MarkBitFrom(it->first))) {
+      heap_->isolate()->array_buffer_allocator()->Free(it->second.first,
+                                                       it->second.second);
+      freed_memory += it->second.second;
+      it = array_buffers_.erase(it);
+    } else {
+      it++;
     }
   }
-
-  not_yet_discovered_array_buffers_for_scavenge_ =
-      live_array_buffers_for_scavenge_;
-  if (!from_scavenge) not_yet_discovered_array_buffers_ = live_array_buffers_;
-
-  // Do not call through the api as this code is triggered while doing a GC.
-  heap()->update_amount_of_external_allocated_memory(
-      -static_cast<int64_t>(freed_memory));
+  if (freed_memory > 0) {
+    heap_->update_external_memory_concurrently_freed(
+        static_cast<intptr_t>(freed_memory));
+  }
 }
 
-
-void ArrayBufferTracker::PrepareDiscoveryInNewSpace() {
-  not_yet_discovered_array_buffers_for_scavenge_ =
-      live_array_buffers_for_scavenge_;
+template <typename Callback>
+void LocalArrayBufferTracker::Process(Callback callback) {
+  JSArrayBuffer* new_buffer = nullptr;
+  size_t freed_memory = 0;
+  for (TrackingMap::iterator it = array_buffers_.begin();
+       it != array_buffers_.end();) {
+    const CallbackResult result = callback(it->first, &new_buffer);
+    if (result == kKeepEntry) {
+      it++;
+    } else if (result == kUpdateEntry) {
+      DCHECK_NOT_NULL(new_buffer);
+      Page* target_page = Page::FromAddress(new_buffer->address());
+      // We need to lock the target page because we cannot guarantee
+      // exclusive access to new space pages.
+      if (target_page->InNewSpace()) target_page->mutex()->Lock();
+      LocalArrayBufferTracker* tracker = target_page->local_tracker();
+      if (tracker == nullptr) {
+        target_page->AllocateLocalTracker();
+        tracker = target_page->local_tracker();
+      }
+      DCHECK_NOT_NULL(tracker);
+      tracker->Add(new_buffer, it->second);
+      if (target_page->InNewSpace()) target_page->mutex()->Unlock();
+      it = array_buffers_.erase(it);
+    } else if (result == kRemoveEntry) {
+      heap_->isolate()->array_buffer_allocator()->Free(it->second.first,
+                                                       it->second.second);
+      freed_memory += it->second.second;
+      it = array_buffers_.erase(it);
+    } else {
+      UNREACHABLE();
+    }
+  }
+  if (freed_memory > 0) {
+    heap_->update_external_memory_concurrently_freed(
+        static_cast<intptr_t>(freed_memory));
+  }
 }
 
+void ArrayBufferTracker::FreeDeadInNewSpace(Heap* heap) {
+  DCHECK_EQ(heap->gc_state(), Heap::HeapState::SCAVENGE);
+  for (Page* page : NewSpacePageRange(heap->new_space()->FromSpaceStart(),
+                                      heap->new_space()->FromSpaceEnd())) {
+    bool empty = ProcessBuffers(page, kUpdateForwardedRemoveOthers);
+    CHECK(empty);
+  }
+  heap->account_external_memory_concurrently_freed();
+}
 
-void ArrayBufferTracker::Promote(JSArrayBuffer* buffer) {
-  base::LockGuard<base::Mutex> guard(&mutex_);
+void ArrayBufferTracker::FreeDead(Page* page) {
+  // Callers need to ensure having the page lock.
+  LocalArrayBufferTracker* tracker = page->local_tracker();
+  if (tracker == nullptr) return;
+  DCHECK(!page->SweepingDone());
+  tracker->Free<LocalArrayBufferTracker::kFreeDead>();
+  if (tracker->IsEmpty()) {
+    page->ReleaseLocalTracker();
+  }
+}
 
-  if (buffer->is_external()) return;
-  void* data = buffer->backing_store();
-  if (!data) return;
-  // ArrayBuffer might be in the middle of being constructed.
-  if (data == heap()->undefined_value()) return;
-  DCHECK(live_array_buffers_for_scavenge_.count(data) > 0);
-  live_array_buffers_[data] = live_array_buffers_for_scavenge_[data];
-  live_array_buffers_for_scavenge_.erase(data);
-  not_yet_discovered_array_buffers_for_scavenge_.erase(data);
+void ArrayBufferTracker::FreeAll(Page* page) {
+  LocalArrayBufferTracker* tracker = page->local_tracker();
+  if (tracker == nullptr) return;
+  tracker->Free<LocalArrayBufferTracker::kFreeAll>();
+  if (tracker->IsEmpty()) {
+    page->ReleaseLocalTracker();
+  }
+}
+
+bool ArrayBufferTracker::ProcessBuffers(Page* page, ProcessingMode mode) {
+  LocalArrayBufferTracker* tracker = page->local_tracker();
+  if (tracker == nullptr) return true;
+
+  DCHECK(page->SweepingDone());
+  tracker->Process(
+      [mode](JSArrayBuffer* old_buffer, JSArrayBuffer** new_buffer) {
+        MapWord map_word = old_buffer->map_word();
+        if (map_word.IsForwardingAddress()) {
+          *new_buffer = JSArrayBuffer::cast(map_word.ToForwardingAddress());
+          return LocalArrayBufferTracker::kUpdateEntry;
+        }
+        return mode == kUpdateForwardedKeepOthers
+                   ? LocalArrayBufferTracker::kKeepEntry
+                   : LocalArrayBufferTracker::kRemoveEntry;
+      });
+  return tracker->IsEmpty();
+}
+
+bool ArrayBufferTracker::IsTracked(JSArrayBuffer* buffer) {
+  Page* page = Page::FromAddress(buffer->address());
+  {
+    base::LockGuard<base::Mutex> guard(page->mutex());
+    LocalArrayBufferTracker* tracker = page->local_tracker();
+    if (tracker == nullptr) return false;
+    return tracker->IsTracked(buffer);
+  }
 }
 
 }  // namespace internal
diff --git a/src/heap/array-buffer-tracker.h b/src/heap/array-buffer-tracker.h
index 6130003..b015aa0 100644
--- a/src/heap/array-buffer-tracker.h
+++ b/src/heap/array-buffer-tracker.h
@@ -5,71 +5,97 @@
 #ifndef V8_HEAP_ARRAY_BUFFER_TRACKER_H_
 #define V8_HEAP_ARRAY_BUFFER_TRACKER_H_
 
-#include <map>
+#include <unordered_map>
 
+#include "src/allocation.h"
 #include "src/base/platform/mutex.h"
 #include "src/globals.h"
 
 namespace v8 {
 namespace internal {
 
-// Forward declarations.
 class Heap;
 class JSArrayBuffer;
+class Page;
 
-class ArrayBufferTracker {
+class ArrayBufferTracker : public AllStatic {
  public:
-  explicit ArrayBufferTracker(Heap* heap) : heap_(heap) {}
-  ~ArrayBufferTracker();
-
-  inline Heap* heap() { return heap_; }
+  enum ProcessingMode {
+    kUpdateForwardedRemoveOthers,
+    kUpdateForwardedKeepOthers,
+  };
 
   // The following methods are used to track raw C++ pointers to externally
   // allocated memory used as backing store in live array buffers.
 
-  // A new ArrayBuffer was created with |data| as backing store.
-  void RegisterNew(JSArrayBuffer* buffer);
+  // Register/unregister a new JSArrayBuffer |buffer| for tracking. Guards all
+  // access to the tracker by taking the page lock for the corresponding page.
+  inline static void RegisterNew(Heap* heap, JSArrayBuffer* buffer);
+  inline static void Unregister(Heap* heap, JSArrayBuffer* buffer);
 
-  // The backing store |data| is no longer owned by V8.
-  void Unregister(JSArrayBuffer* buffer);
+  // Frees all backing store pointers for dead JSArrayBuffers in new space.
+  // Does not take any locks and can only be called during Scavenge.
+  static void FreeDeadInNewSpace(Heap* heap);
 
-  // A live ArrayBuffer was discovered during marking/scavenge.
-  void MarkLive(JSArrayBuffer* buffer);
+  // Frees all backing store pointers for dead JSArrayBuffer on a given page.
+  // Requires marking information to be present. Requires the page lock to be
+  // taken by the caller.
+  static void FreeDead(Page* page);
 
-  // Frees all backing store pointers that weren't discovered in the previous
-  // marking or scavenge phase.
-  void FreeDead(bool from_scavenge);
+  // Frees all remaining, live or dead, array buffers on a page. Only useful
+  // during tear down.
+  static void FreeAll(Page* page);
 
-  // Prepare for a new scavenge phase. A new marking phase is implicitly
-  // prepared by finishing the previous one.
-  void PrepareDiscoveryInNewSpace();
+  // Processes all array buffers on a given page. |mode| specifies the action
+  // to perform on the buffers. Returns whether the tracker is empty or not.
+  static bool ProcessBuffers(Page* page, ProcessingMode mode);
 
-  // An ArrayBuffer moved from new space to old space.
-  void Promote(JSArrayBuffer* buffer);
+  // Returns whether a buffer is currently tracked.
+  static bool IsTracked(JSArrayBuffer* buffer);
+};
+
+// LocalArrayBufferTracker tracks internalized array buffers.
+//
+// Never use directly but instead always call through |ArrayBufferTracker|.
+class LocalArrayBufferTracker {
+ public:
+  typedef std::pair<void*, size_t> Value;
+  typedef JSArrayBuffer* Key;
+
+  enum CallbackResult { kKeepEntry, kUpdateEntry, kRemoveEntry };
+  enum FreeMode { kFreeDead, kFreeAll };
+
+  explicit LocalArrayBufferTracker(Heap* heap) : heap_(heap) {}
+  ~LocalArrayBufferTracker();
+
+  inline void Add(Key key, const Value& value);
+  inline Value Remove(Key key);
+
+  // Frees up array buffers determined by |free_mode|.
+  template <FreeMode free_mode>
+  void Free();
+
+  // Processes buffers one by one. The CallbackResult of the callback decides
+  // what action to take on the buffer.
+  //
+  // Callback should be of type:
+  //   CallbackResult fn(JSArrayBuffer* buffer, JSArrayBuffer** new_buffer);
+  template <typename Callback>
+  inline void Process(Callback callback);
+
+  bool IsEmpty() { return array_buffers_.empty(); }
+
+  bool IsTracked(Key key) {
+    return array_buffers_.find(key) != array_buffers_.end();
+  }
 
  private:
-  base::Mutex mutex_;
+  typedef std::unordered_map<Key, Value> TrackingMap;
+
   Heap* heap_;
-
-  // |live_array_buffers_| maps externally allocated memory used as backing
-  // store for ArrayBuffers to the length of the respective memory blocks.
-  //
-  // At the beginning of mark/compact, |not_yet_discovered_array_buffers_| is
-  // a copy of |live_array_buffers_| and we remove pointers as we discover live
-  // ArrayBuffer objects during marking. At the end of mark/compact, the
-  // remaining memory blocks can be freed.
-  std::map<void*, size_t> live_array_buffers_;
-  std::map<void*, size_t> not_yet_discovered_array_buffers_;
-
-  // To be able to free memory held by ArrayBuffers during scavenge as well, we
-  // have a separate list of allocated memory held by ArrayBuffers in new space.
-  //
-  // Since mark/compact also evacuates the new space, all pointers in the
-  // |live_array_buffers_for_scavenge_| list are also in the
-  // |live_array_buffers_| list.
-  std::map<void*, size_t> live_array_buffers_for_scavenge_;
-  std::map<void*, size_t> not_yet_discovered_array_buffers_for_scavenge_;
+  TrackingMap array_buffers_;
 };
+
 }  // namespace internal
 }  // namespace v8
 #endif  // V8_HEAP_ARRAY_BUFFER_TRACKER_H_
diff --git a/src/heap/gc-tracer.cc b/src/heap/gc-tracer.cc
index 4bae0a4..2b1f06a 100644
--- a/src/heap/gc-tracer.cc
+++ b/src/heap/gc-tracer.cc
@@ -135,6 +135,26 @@
   previous_ = previous_incremental_mark_compactor_event_ = current_;
 }
 
+void GCTracer::ResetForTesting() {
+  cumulative_incremental_marking_steps_ = 0.0;
+  cumulative_incremental_marking_bytes_ = 0.0;
+  cumulative_incremental_marking_duration_ = 0.0;
+  cumulative_pure_incremental_marking_duration_ = 0.0;
+  longest_incremental_marking_step_ = 0.0;
+  cumulative_incremental_marking_finalization_steps_ = 0.0;
+  cumulative_incremental_marking_finalization_duration_ = 0.0;
+  longest_incremental_marking_finalization_step_ = 0.0;
+  cumulative_marking_duration_ = 0.0;
+  cumulative_sweeping_duration_ = 0.0;
+  allocation_time_ms_ = 0.0;
+  new_space_allocation_counter_bytes_ = 0.0;
+  old_generation_allocation_counter_bytes_ = 0.0;
+  allocation_duration_since_gc_ = 0.0;
+  new_space_allocation_in_bytes_since_gc_ = 0.0;
+  old_generation_allocation_in_bytes_since_gc_ = 0.0;
+  combined_mark_compact_speed_cache_ = 0.0;
+  start_counter_ = 0;
+}
 
 void GCTracer::Start(GarbageCollector collector, const char* gc_reason,
                      const char* collector_reason) {
diff --git a/src/heap/gc-tracer.h b/src/heap/gc-tracer.h
index a657f15..ed07750 100644
--- a/src/heap/gc-tracer.h
+++ b/src/heap/gc-tracer.h
@@ -372,6 +372,8 @@
   static double AverageSpeed(const RingBuffer<BytesAndDuration>& buffer,
                              const BytesAndDuration& initial, double time_ms);
 
+  void ResetForTesting();
+
  private:
   // Print one detailed trace line in name=value format.
   // TODO(ernstm): Move to Heap.
diff --git a/src/heap/heap-inl.h b/src/heap/heap-inl.h
index f9c9235..d6c509e 100644
--- a/src/heap/heap-inl.h
+++ b/src/heap/heap-inl.h
@@ -393,14 +393,30 @@
   return OldGenerationSpaceAvailable() < 0;
 }
 
-
+template <PromotionMode promotion_mode>
 bool Heap::ShouldBePromoted(Address old_address, int object_size) {
   Page* page = Page::FromAddress(old_address);
   Address age_mark = new_space_.age_mark();
+
+  if (promotion_mode == PROMOTE_MARKED) {
+    MarkBit mark_bit = Marking::MarkBitFrom(old_address);
+    if (!Marking::IsWhite(mark_bit)) {
+      return true;
+    }
+  }
+
   return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
          (!page->ContainsLimit(age_mark) || old_address < age_mark);
 }
 
+PromotionMode Heap::CurrentPromotionMode() {
+  if (incremental_marking()->IsMarking()) {
+    return PROMOTE_MARKED;
+  } else {
+    return DEFAULT_PROMOTION;
+  }
+}
+
 void Heap::RecordWrite(Object* object, int offset, Object* o) {
   if (!InNewSpace(o) || !object->IsHeapObject() || InNewSpace(object)) {
     return;
@@ -460,6 +476,31 @@
             static_cast<size_t>(byte_size / kPointerSize));
 }
 
+bool Heap::PurgeLeftTrimmedObject(Object** object) {
+  HeapObject* current = reinterpret_cast<HeapObject*>(*object);
+  const MapWord map_word = current->map_word();
+  if (current->IsFiller() && !map_word.IsForwardingAddress()) {
+#ifdef DEBUG
+    // We need to find a FixedArrayBase map after walking the fillers.
+    while (current->IsFiller()) {
+      Address next = reinterpret_cast<Address>(current);
+      if (current->map() == one_pointer_filler_map()) {
+        next += kPointerSize;
+      } else if (current->map() == two_pointer_filler_map()) {
+        next += 2 * kPointerSize;
+      } else {
+        next += current->Size();
+      }
+      current = reinterpret_cast<HeapObject*>(next);
+    }
+    DCHECK(current->IsFixedArrayBase());
+#endif  // DEBUG
+    *object = nullptr;
+    return true;
+  }
+  return false;
+}
+
 template <Heap::FindMementoMode mode>
 AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
   // Check if there is potentially a memento behind the object. If
@@ -510,7 +551,7 @@
 
 template <Heap::UpdateAllocationSiteMode mode>
 void Heap::UpdateAllocationSite(HeapObject* object,
-                                HashMap* pretenuring_feedback) {
+                                base::HashMap* pretenuring_feedback) {
   DCHECK(InFromSpace(object));
   if (!FLAG_allocation_site_pretenuring ||
       !AllocationSite::CanTrack(object->map()->instance_type()))
@@ -538,7 +579,7 @@
     // to dereference the allocation site and rather have to postpone all checks
     // till actually merging the data.
     Address key = memento_candidate->GetAllocationSiteUnchecked();
-    HashMap::Entry* e =
+    base::HashMap::Entry* e =
         pretenuring_feedback->LookupOrInsert(key, ObjectHash(key));
     DCHECK(e != nullptr);
     (*bit_cast<intptr_t*>(&e->value))++;
@@ -596,12 +637,12 @@
   for (int i = 0; i < new_space_strings_.length(); ++i) {
     Object* obj = Object::cast(new_space_strings_[i]);
     DCHECK(heap_->InNewSpace(obj));
-    DCHECK(obj != heap_->the_hole_value());
+    DCHECK(!obj->IsTheHole(heap_->isolate()));
   }
   for (int i = 0; i < old_space_strings_.length(); ++i) {
     Object* obj = Object::cast(old_space_strings_[i]);
     DCHECK(!heap_->InNewSpace(obj));
-    DCHECK(obj != heap_->the_hole_value());
+    DCHECK(!obj->IsTheHole(heap_->isolate()));
   }
 #endif
 }
@@ -710,6 +751,17 @@
   set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
 }
 
+int Heap::GetNextTemplateSerialNumber() {
+  int next_serial_number = next_template_serial_number()->value() + 1;
+  set_next_template_serial_number(Smi::FromInt(next_serial_number));
+  return next_serial_number;
+}
+
+void Heap::SetSerializedTemplates(FixedArray* templates) {
+  DCHECK_EQ(empty_fixed_array(), serialized_templates());
+  set_serialized_templates(templates);
+}
+
 AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
     : heap_(isolate->heap()) {
   heap_->always_allocate_scope_count_.Increment(1);
diff --git a/src/heap/heap.cc b/src/heap/heap.cc
index c8f1557..c59a8d3 100644
--- a/src/heap/heap.cc
+++ b/src/heap/heap.cc
@@ -17,7 +17,7 @@
 #include "src/debug/debug.h"
 #include "src/deoptimizer.h"
 #include "src/global-handles.h"
-#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/array-buffer-tracker-inl.h"
 #include "src/heap/gc-idle-time-handler.h"
 #include "src/heap/gc-tracer.h"
 #include "src/heap/incremental-marking.h"
@@ -32,7 +32,6 @@
 #include "src/heap/scavenger-inl.h"
 #include "src/heap/store-buffer.h"
 #include "src/interpreter/interpreter.h"
-#include "src/profiler/cpu-profiler.h"
 #include "src/regexp/jsregexp.h"
 #include "src/runtime-profiler.h"
 #include "src/snapshot/natives.h"
@@ -69,8 +68,9 @@
 };
 
 Heap::Heap()
-    : amount_of_external_allocated_memory_(0),
-      amount_of_external_allocated_memory_at_last_global_gc_(0),
+    : external_memory_(0),
+      external_memory_limit_(kExternalAllocationLimit),
+      external_memory_at_last_mark_compact_(0),
       isolate_(nullptr),
       code_range_size_(0),
       // semispace_size_ should be a power of 2 and old_generation_size_ should
@@ -160,7 +160,6 @@
       gc_callbacks_depth_(0),
       deserialization_complete_(false),
       strong_roots_list_(NULL),
-      array_buffer_tracker_(NULL),
       heap_iterator_depth_(0),
       force_oom_(false) {
 // Allow build-time customization of the max semispace size. Building
@@ -385,9 +384,8 @@
                          ", committed: %6" V8PRIdPTR " KB\n",
                this->SizeOfObjects() / KB, this->Available() / KB,
                this->CommittedMemory() / KB);
-  PrintIsolate(
-      isolate_, "External memory reported: %6" V8PRIdPTR " KB\n",
-      static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
+  PrintIsolate(isolate_, "External memory reported: %6" V8PRIdPTR " KB\n",
+               static_cast<intptr_t>(external_memory_ / KB));
   PrintIsolate(isolate_, "Total time spent in GC  : %.1f ms\n",
                total_gc_time_ms_);
 }
@@ -502,11 +500,10 @@
   }
 }
 
-
 void Heap::MergeAllocationSitePretenuringFeedback(
-    const HashMap& local_pretenuring_feedback) {
+    const base::HashMap& local_pretenuring_feedback) {
   AllocationSite* site = nullptr;
-  for (HashMap::Entry* local_entry = local_pretenuring_feedback.Start();
+  for (base::HashMap::Entry* local_entry = local_pretenuring_feedback.Start();
        local_entry != nullptr;
        local_entry = local_pretenuring_feedback.Next(local_entry)) {
     site = reinterpret_cast<AllocationSite*>(local_entry->key);
@@ -535,8 +532,8 @@
 class Heap::PretenuringScope {
  public:
   explicit PretenuringScope(Heap* heap) : heap_(heap) {
-    heap_->global_pretenuring_feedback_ =
-        new HashMap(HashMap::PointersMatch, kInitialFeedbackCapacity);
+    heap_->global_pretenuring_feedback_ = new base::HashMap(
+        base::HashMap::PointersMatch, kInitialFeedbackCapacity);
   }
 
   ~PretenuringScope() {
@@ -562,7 +559,7 @@
 
     // Step 1: Digest feedback for recorded allocation sites.
     bool maximum_size_scavenge = MaximumSizeScavenge();
-    for (HashMap::Entry* e = global_pretenuring_feedback_->Start();
+    for (base::HashMap::Entry* e = global_pretenuring_feedback_->Start();
          e != nullptr; e = global_pretenuring_feedback_->Next(e)) {
       allocation_sites++;
       site = reinterpret_cast<AllocationSite*>(e->key);
@@ -1111,9 +1108,11 @@
     // Visit all HeapObject pointers in [start, end).
     for (Object** p = start; p < end; p++) {
       if ((*p)->IsHeapObject()) {
+        HeapObject* object = HeapObject::cast(*p);
+        Isolate* isolate = object->GetIsolate();
         // Check that the string is actually internalized.
-        CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
-              (*p)->IsInternalizedString());
+        CHECK(object->IsTheHole(isolate) || object->IsUndefined(isolate) ||
+              object->IsInternalizedString());
       }
     }
   }
@@ -1212,12 +1211,12 @@
   }
 
   Object* context = native_contexts_list();
-  while (!context->IsUndefined()) {
+  while (!context->IsUndefined(isolate())) {
     // GC can happen when the context is not fully initialized,
     // so the cache can be undefined.
     Object* cache =
         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
-    if (!cache->IsUndefined()) {
+    if (!cache->IsUndefined(isolate())) {
       NormalizedMapCache::cast(cache)->Clear();
     }
     context = Context::cast(context)->next_context_link();
@@ -1340,8 +1339,8 @@
   intptr_t old_gen_size = PromotedSpaceSizeOfObjects();
   if (collector == MARK_COMPACTOR) {
     // Register the amount of external allocated memory.
-    amount_of_external_allocated_memory_at_last_global_gc_ =
-        amount_of_external_allocated_memory_;
+    external_memory_at_last_mark_compact_ = external_memory_;
+    external_memory_limit_ = external_memory_ + kExternalAllocationLimit;
     SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
   } else if (HasLowYoungGenerationAllocationRate() &&
              old_generation_size_configured_) {
@@ -1612,6 +1611,8 @@
   // Pause the inline allocation steps.
   PauseAllocationObserversScope pause_observers(this);
 
+  mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
+
 #ifdef VERIFY_HEAP
   if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
 #endif
@@ -1626,7 +1627,12 @@
 
   scavenge_collector_->SelectScavengingVisitorsTable();
 
-  array_buffer_tracker()->PrepareDiscoveryInNewSpace();
+  if (UsingEmbedderHeapTracer()) {
+    // Register found wrappers with embedder so it can add them to its marking
+    // deque and correctly manage the case when v8 scavenger collects the
+    // wrappers by either keeping wrappables alive, or cleaning marking deque.
+    mark_compact_collector()->RegisterWrappersWithEmbedderHeapTracer();
+  }
 
   // Flip the semispaces.  After flipping, to space is empty, from space has
   // live objects.
@@ -1653,6 +1659,7 @@
   Address new_space_front = new_space_.ToSpaceStart();
   promotion_queue_.Initialize();
 
+  PromotionMode promotion_mode = CurrentPromotionMode();
   ScavengeVisitor scavenge_visitor(this);
 
   if (FLAG_scavenge_reclaim_unmodified_objects) {
@@ -1669,8 +1676,21 @@
   {
     // Copy objects reachable from the old generation.
     TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
-    RememberedSet<OLD_TO_NEW>::IterateWithWrapper(this,
-                                                  Scavenger::ScavengeObject);
+    RememberedSet<OLD_TO_NEW>::Iterate(this, [this](Address addr) {
+      return Scavenger::CheckAndScavengeObject(this, addr);
+    });
+
+    RememberedSet<OLD_TO_NEW>::IterateTyped(
+        this, [this](SlotType type, Address host_addr, Address addr) {
+          return UpdateTypedSlotHelper::UpdateTypedSlot(
+              isolate(), type, addr, [this](Object** addr) {
+                // We expect that objects referenced by code are long living.
+                // If we do not force promotion, then we need to clear
+                // old_to_new slots in dead code objects after mark-compact.
+                return Scavenger::CheckAndScavengeObject(
+                    this, reinterpret_cast<Address>(addr));
+              });
+        });
   }
 
   {
@@ -1692,7 +1712,8 @@
 
   {
     TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE);
-    new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+    new_space_front =
+        DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
   }
 
   if (FLAG_scavenge_reclaim_unmodified_objects) {
@@ -1701,12 +1722,14 @@
 
     isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
         &scavenge_visitor);
-    new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+    new_space_front =
+        DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
   } else {
     TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OBJECT_GROUPS);
     while (isolate()->global_handles()->IterateObjectGroups(
         &scavenge_visitor, &IsUnscavengedHeapObject)) {
-      new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+      new_space_front =
+          DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
     }
     isolate()->global_handles()->RemoveObjectGroups();
     isolate()->global_handles()->RemoveImplicitRefGroups();
@@ -1716,7 +1739,8 @@
 
     isolate()->global_handles()->IterateNewSpaceWeakIndependentRoots(
         &scavenge_visitor);
-    new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+    new_space_front =
+        DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
   }
 
   UpdateNewSpaceReferencesInExternalStringTable(
@@ -1734,7 +1758,7 @@
   // Set age mark.
   new_space_.set_age_mark(new_space_.top());
 
-  array_buffer_tracker()->FreeDead(true);
+  ArrayBufferTracker::FreeDeadInNewSpace(this);
 
   // Update how much has survived scavenge.
   IncrementYoungSurvivorsCounter(static_cast<int>(
@@ -1898,9 +1922,9 @@
   external_string_table_.Iterate(&external_string_table_visitor);
 }
 
-
 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
-                         Address new_space_front) {
+                         Address new_space_front,
+                         PromotionMode promotion_mode) {
   do {
     SemiSpace::AssertValidRange(new_space_front, new_space_.top());
     // The addresses new_space_front and new_space_.top() define a
@@ -1909,8 +1933,14 @@
     while (new_space_front != new_space_.top()) {
       if (!Page::IsAlignedToPageSize(new_space_front)) {
         HeapObject* object = HeapObject::FromAddress(new_space_front);
-        new_space_front +=
-            StaticScavengeVisitor::IterateBody(object->map(), object);
+        if (promotion_mode == PROMOTE_MARKED) {
+          new_space_front += StaticScavengeVisitor<PROMOTE_MARKED>::IterateBody(
+              object->map(), object);
+        } else {
+          new_space_front +=
+              StaticScavengeVisitor<DEFAULT_PROMOTION>::IterateBody(
+                  object->map(), object);
+        }
       } else {
         new_space_front = Page::FromAllocationAreaAddress(new_space_front)
                               ->next_page()
@@ -2014,12 +2044,12 @@
 
 
 void Heap::RegisterNewArrayBuffer(JSArrayBuffer* buffer) {
-  return array_buffer_tracker()->RegisterNew(buffer);
+  ArrayBufferTracker::RegisterNew(this, buffer);
 }
 
 
 void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) {
-  return array_buffer_tracker()->Unregister(buffer);
+  ArrayBufferTracker::Unregister(this, buffer);
 }
 
 
@@ -2306,8 +2336,9 @@
     }
 
     {  // Create a separate external one byte string map for native sources.
-      AllocationResult allocation = AllocateMap(EXTERNAL_ONE_BYTE_STRING_TYPE,
-                                                ExternalOneByteString::kSize);
+      AllocationResult allocation =
+          AllocateMap(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE,
+                      ExternalOneByteString::kShortSize);
       if (!allocation.To(&obj)) return false;
       Map* map = Map::cast(obj);
       map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
@@ -2794,6 +2825,20 @@
   }
 
   {
+    Handle<FixedArray> empty_literals_array =
+        factory->NewFixedArray(1, TENURED);
+    empty_literals_array->set(0, *factory->empty_fixed_array());
+    set_empty_literals_array(*empty_literals_array);
+  }
+
+  {
+    Handle<FixedArray> empty_sloppy_arguments_elements =
+        factory->NewFixedArray(2, TENURED);
+    empty_sloppy_arguments_elements->set_map(sloppy_arguments_elements_map());
+    set_empty_sloppy_arguments_elements(*empty_sloppy_arguments_elements);
+  }
+
+  {
     Handle<WeakCell> cell = factory->NewWeakCell(factory->undefined_value());
     set_empty_weak_cell(*cell);
     cell->clear();
@@ -2825,6 +2870,7 @@
 
   // Handling of script id generation is in Heap::NextScriptId().
   set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
+  set_next_template_serial_number(Smi::FromInt(0));
 
   // Allocate the empty script.
   Handle<Script> script = factory->NewScript(factory->empty_string());
@@ -2851,6 +2897,8 @@
       handle(Smi::FromInt(Isolate::kArrayProtectorValid), isolate()));
   set_species_protector(*species_cell);
 
+  set_serialized_templates(empty_fixed_array());
+
   set_weak_stack_trace_list(Smi::FromInt(0));
 
   set_noscript_shared_function_infos(Smi::FromInt(0));
@@ -2888,6 +2936,7 @@
     case kRetainedMapsRootIndex:
     case kNoScriptSharedFunctionInfosRootIndex:
     case kWeakStackTraceListRootIndex:
+    case kSerializedTemplatesRootIndex:
 // Smi values
 #define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
       SMI_ROOT_LIST(SMI_ENTRY)
@@ -3073,14 +3122,8 @@
 
   if (lo_space()->Contains(object)) return false;
 
-  Page* page = Page::FromAddress(address);
-  // We can move the object start if:
-  // (1) the object is not in old space,
-  // (2) the page of the object was already swept,
-  // (3) the page was already concurrently swept. This case is an optimization
-  // for concurrent sweeping. The WasSwept predicate for concurrently swept
-  // pages is set after sweeping all pages.
-  return !InOldSpace(object) || page->SweepingDone();
+  // We can move the object start if the page was already swept.
+  return Page::FromAddress(address)->SweepingDone();
 }
 
 
@@ -3105,6 +3148,7 @@
 
 FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
                                          int elements_to_trim) {
+  CHECK_NOT_NULL(object);
   DCHECK(!object->IsFixedTypedArrayBase());
   DCHECK(!object->IsByteArray());
   const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
@@ -3323,8 +3367,7 @@
   result->set_map_no_write_barrier(code_map());
   Code* code = Code::cast(result);
   DCHECK(IsAligned(bit_cast<intptr_t>(code->address()), kCodeAlignment));
-  DCHECK(memory_allocator()->code_range() == NULL ||
-         !memory_allocator()->code_range()->valid() ||
+  DCHECK(!memory_allocator()->code_range()->valid() ||
          memory_allocator()->code_range()->contains(code->address()) ||
          object_size <= code_space()->AreaSize());
   code->set_gc_metadata(Smi::FromInt(0));
@@ -3350,8 +3393,7 @@
 
   // Relocate the copy.
   DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
-  DCHECK(memory_allocator()->code_range() == NULL ||
-         !memory_allocator()->code_range()->valid() ||
+  DCHECK(!memory_allocator()->code_range()->valid() ||
          memory_allocator()->code_range()->contains(code->address()) ||
          obj_size <= code_space()->AreaSize());
   new_code->Relocate(new_addr - old_addr);
@@ -3382,60 +3424,6 @@
   return copy;
 }
 
-AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
-  // Allocate ByteArray before the Code object, so that we do not risk
-  // leaving uninitialized Code object (and breaking the heap).
-  ByteArray* reloc_info_array = nullptr;
-  {
-    AllocationResult allocation =
-        AllocateByteArray(reloc_info.length(), TENURED);
-    if (!allocation.To(&reloc_info_array)) return allocation;
-  }
-
-  int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
-
-  int new_obj_size = Code::SizeFor(new_body_size);
-
-  Address old_addr = code->address();
-
-  size_t relocation_offset =
-      static_cast<size_t>(code->instruction_end() - old_addr);
-
-  HeapObject* result = nullptr;
-  AllocationResult allocation = AllocateRaw(new_obj_size, CODE_SPACE);
-  if (!allocation.To(&result)) return allocation;
-
-  // Copy code object.
-  Address new_addr = result->address();
-
-  // Copy header and instructions.
-  CopyBytes(new_addr, old_addr, relocation_offset);
-
-  Code* new_code = Code::cast(result);
-  new_code->set_relocation_info(reloc_info_array);
-
-  // Copy patched rinfo.
-  CopyBytes(new_code->relocation_start(), reloc_info.start(),
-            static_cast<size_t>(reloc_info.length()));
-
-  // Relocate the copy.
-  DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
-  DCHECK(memory_allocator()->code_range() == NULL ||
-         !memory_allocator()->code_range()->valid() ||
-         memory_allocator()->code_range()->contains(code->address()) ||
-         new_obj_size <= code_space()->AreaSize());
-
-  new_code->Relocate(new_addr - old_addr);
-  // We have to iterate over over the object and process its pointers when
-  // black allocation is on.
-  incremental_marking()->IterateBlackObject(new_code);
-#ifdef VERIFY_HEAP
-  if (FLAG_verify_heap) code->ObjectVerify();
-#endif
-  return new_code;
-}
-
-
 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
                                        AllocationSite* allocation_site) {
   memento->set_map_no_write_barrier(allocation_memento_map());
@@ -3533,7 +3521,8 @@
   // Initialize the JSObject.
   InitializeJSObjectFromMap(js_obj, properties, map);
   DCHECK(js_obj->HasFastElements() || js_obj->HasFixedTypedArrayElements() ||
-         js_obj->HasFastStringWrapperElements());
+         js_obj->HasFastStringWrapperElements() ||
+         js_obj->HasFastArgumentsElements());
   return js_obj;
 }
 
@@ -3559,10 +3548,11 @@
   // Make the clone.
   Map* map = source->map();
 
-  // We can only clone regexps, normal objects, api objects or arrays. Copying
-  // anything else will break invariants.
+  // We can only clone regexps, normal objects, api objects, errors or arrays.
+  // Copying anything else will break invariants.
   CHECK(map->instance_type() == JS_REGEXP_TYPE ||
         map->instance_type() == JS_OBJECT_TYPE ||
+        map->instance_type() == JS_ERROR_TYPE ||
         map->instance_type() == JS_ARRAY_TYPE ||
         map->instance_type() == JS_API_OBJECT_TYPE ||
         map->instance_type() == JS_SPECIAL_API_OBJECT_TYPE);
@@ -4420,8 +4410,36 @@
 }
 
 void Heap::CollectGarbageOnMemoryPressure(const char* source) {
+  const int kGarbageThresholdInBytes = 8 * MB;
+  const double kGarbageThresholdAsFractionOfTotalMemory = 0.1;
+  // This constant is the maximum response time in RAIL performance model.
+  const double kMaxMemoryPressurePauseMs = 100;
+
+  double start = MonotonicallyIncreasingTimeInMs();
   CollectAllGarbage(kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
-                    source);
+                    source, kGCCallbackFlagCollectAllAvailableGarbage);
+  double end = MonotonicallyIncreasingTimeInMs();
+
+  // Estimate how much memory we can free.
+  int64_t potential_garbage =
+      (CommittedMemory() - SizeOfObjects()) + external_memory_;
+  // If we can potentially free large amount of memory, then start GC right
+  // away instead of waiting for memory reducer.
+  if (potential_garbage >= kGarbageThresholdInBytes &&
+      potential_garbage >=
+          CommittedMemory() * kGarbageThresholdAsFractionOfTotalMemory) {
+    // If we spent less than half of the time budget, then perform full GC
+    // Otherwise, start incremental marking.
+    if (end - start < kMaxMemoryPressurePauseMs / 2) {
+      CollectAllGarbage(
+          kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask, source,
+          kGCCallbackFlagCollectAllAvailableGarbage);
+    } else {
+      if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
+        StartIdleIncrementalMarking();
+      }
+    }
+  }
 }
 
 void Heap::MemoryPressureNotification(MemoryPressureLevel level,
@@ -4444,6 +4462,15 @@
   }
 }
 
+void Heap::CollectCodeStatistics() {
+  PagedSpace::ResetCodeAndMetadataStatistics(isolate());
+  // We do not look for code in new space, or map space.  If code
+  // somehow ends up in those spaces, we would miss it here.
+  code_space_->CollectCodeStatistics();
+  old_space_->CollectCodeStatistics();
+  lo_space_->CollectCodeStatistics();
+}
+
 #ifdef DEBUG
 
 void Heap::Print() {
@@ -4458,11 +4485,7 @@
 
 void Heap::ReportCodeStatistics(const char* title) {
   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
-  PagedSpace::ResetCodeStatistics(isolate());
-  // We do not look for code in new space, map space, or old space.  If code
-  // somehow ends up in those spaces, we would miss it here.
-  code_space_->CollectCodeStatistics();
-  lo_space_->CollectCodeStatistics();
+  CollectCodeStatistics();
   PagedSpace::ReportCodeStatistics(isolate());
 }
 
@@ -4632,10 +4655,8 @@
 
 void Heap::ZapFromSpace() {
   if (!new_space_.IsFromSpaceCommitted()) return;
-  NewSpacePageIterator it(new_space_.FromSpaceStart(),
-                          new_space_.FromSpaceEnd());
-  while (it.has_next()) {
-    Page* page = it.next();
+  for (Page* page : NewSpacePageRange(new_space_.FromSpaceStart(),
+                                      new_space_.FromSpaceEnd())) {
     for (Address cursor = page->area_start(), limit = page->area_end();
          cursor < limit; cursor += kPointerSize) {
       Memory::Address_at(cursor) = kFromSpaceZapValue;
@@ -4759,49 +4780,6 @@
   v->Synchronize(VisitorSynchronization::kSmiRootList);
 }
 
-// We cannot avoid stale handles to left-trimmed objects, but can only make
-// sure all handles still needed are updated. Filter out a stale pointer
-// and clear the slot to allow post processing of handles (needed because
-// the sweeper might actually free the underlying page).
-class FixStaleLeftTrimmedHandlesVisitor : public ObjectVisitor {
- public:
-  explicit FixStaleLeftTrimmedHandlesVisitor(Heap* heap) : heap_(heap) {
-    USE(heap_);
-  }
-
-  void VisitPointer(Object** p) override { FixHandle(p); }
-
-  void VisitPointers(Object** start, Object** end) override {
-    for (Object** p = start; p < end; p++) FixHandle(p);
-  }
-
- private:
-  inline void FixHandle(Object** p) {
-    HeapObject* current = reinterpret_cast<HeapObject*>(*p);
-    if (!current->IsHeapObject()) return;
-    const MapWord map_word = current->map_word();
-    if (!map_word.IsForwardingAddress() && current->IsFiller()) {
-#ifdef DEBUG
-      // We need to find a FixedArrayBase map after walking the fillers.
-      while (current->IsFiller()) {
-        Address next = reinterpret_cast<Address>(current);
-        if (current->map() == heap_->one_pointer_filler_map()) {
-          next += kPointerSize;
-        } else if (current->map() == heap_->two_pointer_filler_map()) {
-          next += 2 * kPointerSize;
-        } else {
-          next += current->Size();
-        }
-        current = reinterpret_cast<HeapObject*>(next);
-      }
-      DCHECK(current->IsFixedArrayBase());
-#endif  // DEBUG
-      *p = nullptr;
-    }
-  }
-
-  Heap* heap_;
-};
 
 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
@@ -4817,13 +4795,13 @@
   v->Synchronize(VisitorSynchronization::kTop);
   Relocatable::Iterate(isolate_, v);
   v->Synchronize(VisitorSynchronization::kRelocatable);
+  isolate_->debug()->Iterate(v);
+  v->Synchronize(VisitorSynchronization::kDebug);
 
   isolate_->compilation_cache()->Iterate(v);
   v->Synchronize(VisitorSynchronization::kCompilationCache);
 
   // Iterate over local handles in handle scopes.
-  FixStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
-  isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
   isolate_->handle_scope_implementer()->Iterate(v);
   isolate_->IterateDeferredHandles(v);
   v->Synchronize(VisitorSynchronization::kHandleScope);
@@ -5064,11 +5042,8 @@
 
 
 int64_t Heap::PromotedExternalMemorySize() {
-  if (amount_of_external_allocated_memory_ <=
-      amount_of_external_allocated_memory_at_last_global_gc_)
-    return 0;
-  return amount_of_external_allocated_memory_ -
-         amount_of_external_allocated_memory_at_last_global_gc_;
+  if (external_memory_ <= external_memory_at_last_mark_compact_) return 0;
+  return external_memory_ - external_memory_at_last_mark_compact_;
 }
 
 
@@ -5242,7 +5217,8 @@
 
 static void InitializeGCOnce() {
   Scavenger::Initialize();
-  StaticScavengeVisitor::Initialize();
+  StaticScavengeVisitor<DEFAULT_PROMOTION>::Initialize();
+  StaticScavengeVisitor<PROMOTE_MARKED>::Initialize();
   MarkCompactCollector::Initialize();
 }
 
@@ -5335,8 +5311,6 @@
 
   scavenge_job_ = new ScavengeJob();
 
-  array_buffer_tracker_ = new ArrayBufferTracker(this);
-
   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
 
@@ -5399,8 +5373,9 @@
   // All pages right after bootstrapping must be marked as never-evacuate.
   PagedSpaces spaces(this);
   for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) {
-    PageIterator it(s);
-    while (it.has_next()) CHECK(it.next()->NeverEvacuate());
+    for (Page* p : *s) {
+      CHECK(p->NeverEvacuate());
+    }
   }
 #endif  // DEBUG
 }
@@ -5496,9 +5471,6 @@
   delete scavenge_job_;
   scavenge_job_ = nullptr;
 
-  delete array_buffer_tracker_;
-  array_buffer_tracker_ = nullptr;
-
   isolate_->global_handles()->TearDown();
 
   external_string_table_.TearDown();
@@ -5605,6 +5577,33 @@
   return DependentCode::cast(empty_fixed_array());
 }
 
+namespace {
+void CompactWeakFixedArray(Object* object) {
+  if (object->IsWeakFixedArray()) {
+    WeakFixedArray* array = WeakFixedArray::cast(object);
+    array->Compact<WeakFixedArray::NullCallback>();
+  }
+}
+}  // anonymous namespace
+
+void Heap::CompactWeakFixedArrays() {
+  // Find known WeakFixedArrays and compact them.
+  HeapIterator iterator(this);
+  for (HeapObject* o = iterator.next(); o != NULL; o = iterator.next()) {
+    if (o->IsPrototypeInfo()) {
+      Object* prototype_users = PrototypeInfo::cast(o)->prototype_users();
+      if (prototype_users->IsWeakFixedArray()) {
+        WeakFixedArray* array = WeakFixedArray::cast(prototype_users);
+        array->Compact<JSObject::PrototypeRegistryCompactionCallback>();
+      }
+    } else if (o->IsScript()) {
+      CompactWeakFixedArray(Script::cast(o)->shared_function_infos());
+    }
+  }
+  CompactWeakFixedArray(noscript_shared_function_infos());
+  CompactWeakFixedArray(script_list());
+  CompactWeakFixedArray(weak_stack_trace_list());
+}
 
 void Heap::AddRetainedMap(Handle<Map> map) {
   Handle<WeakCell> cell = Map::WeakCellForMap(map);
@@ -5932,14 +5931,14 @@
   // No iterator means we are done.
   if (object_iterator_ == nullptr) return nullptr;
 
-  if (HeapObject* obj = object_iterator_->next_object()) {
+  if (HeapObject* obj = object_iterator_->Next()) {
     // If the current iterator has more objects we are fine.
     return obj;
   } else {
     // Go though the spaces looking for one that has objects.
     while (space_iterator_->has_next()) {
       object_iterator_ = space_iterator_->next();
-      if (HeapObject* obj = object_iterator_->next_object()) {
+      if (HeapObject* obj = object_iterator_->Next()) {
         return obj;
       }
     }
@@ -6231,8 +6230,9 @@
 
 void Heap::ExternalStringTable::CleanUp() {
   int last = 0;
+  Isolate* isolate = heap_->isolate();
   for (int i = 0; i < new_space_strings_.length(); ++i) {
-    if (new_space_strings_[i] == heap_->the_hole_value()) {
+    if (new_space_strings_[i]->IsTheHole(isolate)) {
       continue;
     }
     DCHECK(new_space_strings_[i]->IsExternalString());
@@ -6247,7 +6247,7 @@
 
   last = 0;
   for (int i = 0; i < old_space_strings_.length(); ++i) {
-    if (old_space_strings_[i] == heap_->the_hole_value()) {
+    if (old_space_strings_[i]->IsTheHole(isolate)) {
       continue;
     }
     DCHECK(old_space_strings_[i]->IsExternalString());
diff --git a/src/heap/heap.h b/src/heap/heap.h
index 8fdb64a..ed1e652 100644
--- a/src/heap/heap.h
+++ b/src/heap/heap.h
@@ -28,68 +28,72 @@
 
 // Defines all the roots in Heap.
 #define STRONG_ROOT_LIST(V)                                                    \
-  V(Map, byte_array_map, ByteArrayMap)                                         \
+  /* Cluster the most popular ones in a few cache lines here at the top.    */ \
+  /* The first 32 entries are most often used in the startup snapshot and   */ \
+  /* can use a shorter representation in the serialization format.          */ \
   V(Map, free_space_map, FreeSpaceMap)                                         \
   V(Map, one_pointer_filler_map, OnePointerFillerMap)                          \
   V(Map, two_pointer_filler_map, TwoPointerFillerMap)                          \
-  /* Cluster the most popular ones in a few cache lines here at the top.    */ \
+  V(Oddball, uninitialized_value, UninitializedValue)                          \
   V(Oddball, undefined_value, UndefinedValue)                                  \
   V(Oddball, the_hole_value, TheHoleValue)                                     \
   V(Oddball, null_value, NullValue)                                            \
   V(Oddball, true_value, TrueValue)                                            \
   V(Oddball, false_value, FalseValue)                                          \
   V(String, empty_string, empty_string)                                        \
-  V(Oddball, uninitialized_value, UninitializedValue)                          \
-  V(Map, cell_map, CellMap)                                                    \
-  V(Map, global_property_cell_map, GlobalPropertyCellMap)                      \
-  V(Map, shared_function_info_map, SharedFunctionInfoMap)                      \
   V(Map, meta_map, MetaMap)                                                    \
-  V(Map, heap_number_map, HeapNumberMap)                                       \
-  V(Map, mutable_heap_number_map, MutableHeapNumberMap)                        \
-  V(Map, float32x4_map, Float32x4Map)                                          \
-  V(Map, int32x4_map, Int32x4Map)                                              \
-  V(Map, uint32x4_map, Uint32x4Map)                                            \
-  V(Map, bool32x4_map, Bool32x4Map)                                            \
-  V(Map, int16x8_map, Int16x8Map)                                              \
-  V(Map, uint16x8_map, Uint16x8Map)                                            \
-  V(Map, bool16x8_map, Bool16x8Map)                                            \
-  V(Map, int8x16_map, Int8x16Map)                                              \
-  V(Map, uint8x16_map, Uint8x16Map)                                            \
-  V(Map, bool8x16_map, Bool8x16Map)                                            \
-  V(Map, native_context_map, NativeContextMap)                                 \
+  V(Map, byte_array_map, ByteArrayMap)                                         \
   V(Map, fixed_array_map, FixedArrayMap)                                       \
-  V(Map, code_map, CodeMap)                                                    \
-  V(Map, scope_info_map, ScopeInfoMap)                                         \
   V(Map, fixed_cow_array_map, FixedCOWArrayMap)                                \
-  V(Map, fixed_double_array_map, FixedDoubleArrayMap)                          \
-  V(Map, weak_cell_map, WeakCellMap)                                           \
-  V(Map, transition_array_map, TransitionArrayMap)                             \
+  V(Map, hash_table_map, HashTableMap)                                         \
+  V(Map, symbol_map, SymbolMap)                                                \
   V(Map, one_byte_string_map, OneByteStringMap)                                \
   V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap)       \
+  V(Map, scope_info_map, ScopeInfoMap)                                         \
+  V(Map, shared_function_info_map, SharedFunctionInfoMap)                      \
+  V(Map, code_map, CodeMap)                                                    \
   V(Map, function_context_map, FunctionContextMap)                             \
+  V(Map, cell_map, CellMap)                                                    \
+  V(Map, weak_cell_map, WeakCellMap)                                           \
+  V(Map, global_property_cell_map, GlobalPropertyCellMap)                      \
+  V(Map, foreign_map, ForeignMap)                                              \
+  V(Map, heap_number_map, HeapNumberMap)                                       \
+  V(Map, transition_array_map, TransitionArrayMap)                             \
+  V(FixedArray, empty_literals_array, EmptyLiteralsArray)                      \
   V(FixedArray, empty_fixed_array, EmptyFixedArray)                            \
-  V(ByteArray, empty_byte_array, EmptyByteArray)                               \
+  V(FixedArray, cleared_optimized_code_map, ClearedOptimizedCodeMap)           \
   V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray)             \
+  /* Entries beyond the first 32                                            */ \
   /* The roots above this line should be boring from a GC point of view.    */ \
   /* This means they are never in new space and never on a page that is     */ \
   /* being compacted.                                                       */ \
+  /* Oddballs */                                                               \
   V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel)      \
   V(Oddball, arguments_marker, ArgumentsMarker)                                \
   V(Oddball, exception, Exception)                                             \
   V(Oddball, termination_exception, TerminationException)                      \
   V(Oddball, optimized_out, OptimizedOut)                                      \
   V(Oddball, stale_register, StaleRegister)                                    \
-  V(FixedArray, number_string_cache, NumberStringCache)                        \
-  V(Object, instanceof_cache_function, InstanceofCacheFunction)                \
-  V(Object, instanceof_cache_map, InstanceofCacheMap)                          \
-  V(Object, instanceof_cache_answer, InstanceofCacheAnswer)                    \
-  V(FixedArray, single_character_string_cache, SingleCharacterStringCache)     \
-  V(FixedArray, string_split_cache, StringSplitCache)                          \
-  V(FixedArray, regexp_multiple_cache, RegExpMultipleCache)                    \
-  V(Smi, hash_seed, HashSeed)                                                  \
-  V(Map, hash_table_map, HashTableMap)                                         \
+  /* Context maps */                                                           \
+  V(Map, native_context_map, NativeContextMap)                                 \
+  V(Map, module_context_map, ModuleContextMap)                                 \
+  V(Map, script_context_map, ScriptContextMap)                                 \
+  V(Map, block_context_map, BlockContextMap)                                   \
+  V(Map, catch_context_map, CatchContextMap)                                   \
+  V(Map, with_context_map, WithContextMap)                                     \
+  V(Map, debug_evaluate_context_map, DebugEvaluateContextMap)                  \
+  V(Map, script_context_table_map, ScriptContextTableMap)                      \
+  /* Maps */                                                                   \
+  V(Map, fixed_double_array_map, FixedDoubleArrayMap)                          \
+  V(Map, mutable_heap_number_map, MutableHeapNumberMap)                        \
   V(Map, ordered_hash_table_map, OrderedHashTableMap)                          \
-  V(Map, symbol_map, SymbolMap)                                                \
+  V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap)            \
+  V(Map, message_object_map, JSMessageObjectMap)                               \
+  V(Map, neander_map, NeanderMap)                                              \
+  V(Map, external_map, ExternalMap)                                            \
+  V(Map, bytecode_array_map, BytecodeArrayMap)                                 \
+  /* String maps */                                                            \
+  V(Map, native_source_string_map, NativeSourceStringMap)                      \
   V(Map, string_map, StringMap)                                                \
   V(Map, cons_one_byte_string_map, ConsOneByteStringMap)                       \
   V(Map, cons_string_map, ConsStringMap)                                       \
@@ -99,7 +103,6 @@
   V(Map, external_string_with_one_byte_data_map,                               \
     ExternalStringWithOneByteDataMap)                                          \
   V(Map, external_one_byte_string_map, ExternalOneByteStringMap)               \
-  V(Map, native_source_string_map, NativeSourceStringMap)                      \
   V(Map, short_external_string_map, ShortExternalStringMap)                    \
   V(Map, short_external_string_with_one_byte_data_map,                         \
     ShortExternalStringWithOneByteDataMap)                                     \
@@ -116,6 +119,7 @@
   V(Map, short_external_one_byte_internalized_string_map,                      \
     ShortExternalOneByteInternalizedStringMap)                                 \
   V(Map, short_external_one_byte_string_map, ShortExternalOneByteStringMap)    \
+  /* Array element maps */                                                     \
   V(Map, fixed_uint8_array_map, FixedUint8ArrayMap)                            \
   V(Map, fixed_int8_array_map, FixedInt8ArrayMap)                              \
   V(Map, fixed_uint16_array_map, FixedUint16ArrayMap)                          \
@@ -125,6 +129,18 @@
   V(Map, fixed_float32_array_map, FixedFloat32ArrayMap)                        \
   V(Map, fixed_float64_array_map, FixedFloat64ArrayMap)                        \
   V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap)             \
+  V(Map, float32x4_map, Float32x4Map)                                          \
+  V(Map, int32x4_map, Int32x4Map)                                              \
+  V(Map, uint32x4_map, Uint32x4Map)                                            \
+  V(Map, bool32x4_map, Bool32x4Map)                                            \
+  V(Map, int16x8_map, Int16x8Map)                                              \
+  V(Map, uint16x8_map, Uint16x8Map)                                            \
+  V(Map, bool16x8_map, Bool16x8Map)                                            \
+  V(Map, int8x16_map, Int8x16Map)                                              \
+  V(Map, uint8x16_map, Uint8x16Map)                                            \
+  V(Map, bool8x16_map, Bool8x16Map)                                            \
+  /* Canonical empty values */                                                 \
+  V(ByteArray, empty_byte_array, EmptyByteArray)                               \
   V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array)        \
   V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array)          \
   V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array)      \
@@ -135,14 +151,57 @@
   V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array)    \
   V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array,                      \
     EmptyFixedUint8ClampedArray)                                               \
-  V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap)            \
-  V(Map, catch_context_map, CatchContextMap)                                   \
-  V(Map, with_context_map, WithContextMap)                                     \
-  V(Map, debug_evaluate_context_map, DebugEvaluateContextMap)                  \
-  V(Map, block_context_map, BlockContextMap)                                   \
-  V(Map, module_context_map, ModuleContextMap)                                 \
-  V(Map, script_context_map, ScriptContextMap)                                 \
-  V(Map, script_context_table_map, ScriptContextTableMap)                      \
+  V(Script, empty_script, EmptyScript)                                         \
+  V(Cell, undefined_cell, UndefinedCell)                                       \
+  V(FixedArray, empty_sloppy_arguments_elements, EmptySloppyArgumentsElements) \
+  V(SeededNumberDictionary, empty_slow_element_dictionary,                     \
+    EmptySlowElementDictionary)                                                \
+  V(TypeFeedbackVector, dummy_vector, DummyVector)                             \
+  V(PropertyCell, empty_property_cell, EmptyPropertyCell)                      \
+  V(WeakCell, empty_weak_cell, EmptyWeakCell)                                  \
+  /* Protectors */                                                             \
+  V(PropertyCell, array_protector, ArrayProtector)                             \
+  V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector)         \
+  V(PropertyCell, has_instance_protector, HasInstanceProtector)                \
+  V(Cell, species_protector, SpeciesProtector)                                 \
+  /* Special numbers */                                                        \
+  V(HeapNumber, nan_value, NanValue)                                           \
+  V(HeapNumber, infinity_value, InfinityValue)                                 \
+  V(HeapNumber, minus_zero_value, MinusZeroValue)                              \
+  V(HeapNumber, minus_infinity_value, MinusInfinityValue)                      \
+  /* Caches */                                                                 \
+  V(FixedArray, number_string_cache, NumberStringCache)                        \
+  V(FixedArray, single_character_string_cache, SingleCharacterStringCache)     \
+  V(FixedArray, string_split_cache, StringSplitCache)                          \
+  V(FixedArray, regexp_multiple_cache, RegExpMultipleCache)                    \
+  V(Object, instanceof_cache_function, InstanceofCacheFunction)                \
+  V(Object, instanceof_cache_map, InstanceofCacheMap)                          \
+  V(Object, instanceof_cache_answer, InstanceofCacheAnswer)                    \
+  V(FixedArray, natives_source_cache, NativesSourceCache)                      \
+  V(FixedArray, experimental_natives_source_cache,                             \
+    ExperimentalNativesSourceCache)                                            \
+  V(FixedArray, extra_natives_source_cache, ExtraNativesSourceCache)           \
+  V(FixedArray, experimental_extra_natives_source_cache,                       \
+    ExperimentalExtraNativesSourceCache)                                       \
+  /* Lists and dictionaries */                                                 \
+  V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames)          \
+  V(NameDictionary, empty_properties_dictionary, EmptyPropertiesDictionary)    \
+  V(Object, symbol_registry, SymbolRegistry)                                   \
+  V(Object, script_list, ScriptList)                                           \
+  V(UnseededNumberDictionary, code_stubs, CodeStubs)                           \
+  V(FixedArray, materialized_objects, MaterializedObjects)                     \
+  V(FixedArray, microtask_queue, MicrotaskQueue)                               \
+  V(FixedArray, detached_contexts, DetachedContexts)                           \
+  V(ArrayList, retained_maps, RetainedMaps)                                    \
+  V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable)           \
+  V(Object, weak_stack_trace_list, WeakStackTraceList)                         \
+  V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos)       \
+  V(FixedArray, serialized_templates, SerializedTemplates)                     \
+  /* Configured values */                                                      \
+  V(JSObject, message_listeners, MessageListeners)                             \
+  V(Code, js_entry_code, JsEntryCode)                                          \
+  V(Code, js_construct_entry_code, JsConstructEntryCode)                       \
+  /* Oddball maps */                                                           \
   V(Map, undefined_map, UndefinedMap)                                          \
   V(Map, the_hole_map, TheHoleMap)                                             \
   V(Map, null_map, NullMap)                                                    \
@@ -153,59 +212,21 @@
   V(Map, exception_map, ExceptionMap)                                          \
   V(Map, termination_exception_map, TerminationExceptionMap)                   \
   V(Map, optimized_out_map, OptimizedOutMap)                                   \
-  V(Map, stale_register_map, StaleRegisterMap)                                 \
-  V(Map, message_object_map, JSMessageObjectMap)                               \
-  V(Map, foreign_map, ForeignMap)                                              \
-  V(Map, neander_map, NeanderMap)                                              \
-  V(Map, external_map, ExternalMap)                                            \
-  V(HeapNumber, nan_value, NanValue)                                           \
-  V(HeapNumber, infinity_value, InfinityValue)                                 \
-  V(HeapNumber, minus_zero_value, MinusZeroValue)                              \
-  V(HeapNumber, minus_infinity_value, MinusInfinityValue)                      \
-  V(JSObject, message_listeners, MessageListeners)                             \
-  V(UnseededNumberDictionary, code_stubs, CodeStubs)                           \
-  V(Code, js_entry_code, JsEntryCode)                                          \
-  V(Code, js_construct_entry_code, JsConstructEntryCode)                       \
-  V(FixedArray, natives_source_cache, NativesSourceCache)                      \
-  V(FixedArray, experimental_natives_source_cache,                             \
-    ExperimentalNativesSourceCache)                                            \
-  V(FixedArray, extra_natives_source_cache, ExtraNativesSourceCache)           \
-  V(FixedArray, experimental_extra_natives_source_cache,                       \
-    ExperimentalExtraNativesSourceCache)                                       \
-  V(Script, empty_script, EmptyScript)                                         \
-  V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames)          \
-  V(NameDictionary, empty_properties_dictionary, EmptyPropertiesDictionary)    \
-  V(Cell, undefined_cell, UndefinedCell)                                       \
-  V(Object, symbol_registry, SymbolRegistry)                                   \
-  V(Object, script_list, ScriptList)                                           \
-  V(SeededNumberDictionary, empty_slow_element_dictionary,                     \
-    EmptySlowElementDictionary)                                                \
-  V(FixedArray, materialized_objects, MaterializedObjects)                     \
-  V(FixedArray, microtask_queue, MicrotaskQueue)                               \
-  V(TypeFeedbackVector, dummy_vector, DummyVector)                             \
-  V(FixedArray, cleared_optimized_code_map, ClearedOptimizedCodeMap)           \
-  V(FixedArray, detached_contexts, DetachedContexts)                           \
-  V(ArrayList, retained_maps, RetainedMaps)                                    \
-  V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable)           \
-  V(PropertyCell, array_protector, ArrayProtector)                             \
-  V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector)         \
-  V(PropertyCell, empty_property_cell, EmptyPropertyCell)                      \
-  V(Object, weak_stack_trace_list, WeakStackTraceList)                         \
-  V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos)       \
-  V(Map, bytecode_array_map, BytecodeArrayMap)                                 \
-  V(WeakCell, empty_weak_cell, EmptyWeakCell)                                  \
-  V(PropertyCell, has_instance_protector, HasInstanceProtector)                \
-  V(Cell, species_protector, SpeciesProtector)
+  V(Map, stale_register_map, StaleRegisterMap)
 
 // Entries in this list are limited to Smis and are not visited during GC.
-#define SMI_ROOT_LIST(V)                                                   \
-  V(Smi, stack_limit, StackLimit)                                          \
-  V(Smi, real_stack_limit, RealStackLimit)                                 \
-  V(Smi, last_script_id, LastScriptId)                                     \
-  V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
-  V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset)       \
-  V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset)             \
-  V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)             \
+#define SMI_ROOT_LIST(V)                                                       \
+  V(Smi, stack_limit, StackLimit)                                              \
+  V(Smi, real_stack_limit, RealStackLimit)                                     \
+  V(Smi, last_script_id, LastScriptId)                                         \
+  V(Smi, hash_seed, HashSeed)                                                  \
+  /* To distinguish the function templates, so that we can find them in the */ \
+  /* function cache of the native context. */                                  \
+  V(Smi, next_template_serial_number, NextTemplateSerialNumber)                \
+  V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset)     \
+  V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset)           \
+  V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset)                 \
+  V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)                 \
   V(Smi, interpreter_entry_return_pc_offset, InterpreterEntryReturnPCOffset)
 
 #define ROOT_LIST(V)  \
@@ -301,6 +322,8 @@
 class ScavengeJob;
 class WeakObjectRetainer;
 
+enum PromotionMode { PROMOTE_MARKED, DEFAULT_PROMOTION };
+
 typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
 
 // A queue of objects promoted during scavenge. Each object is accompanied
@@ -603,6 +626,12 @@
   // stored on the map to facilitate fast dispatch for {StaticVisitorBase}.
   static int GetStaticVisitorIdForMap(Map* map);
 
+  // We cannot avoid stale handles to left-trimmed objects, but can only make
+  // sure all handles still needed are updated. Filter out a stale pointer
+  // and clear the slot to allow post processing of handles (needed because
+  // the sweeper might actually free the underlying page).
+  inline bool PurgeLeftTrimmedObject(Object** object);
+
   // Notifies the heap that is ok to start marking or other activities that
   // should not happen during deserialization.
   void NotifyDeserializationComplete();
@@ -774,8 +803,11 @@
 
   // An object should be promoted if the object has survived a
   // scavenge operation.
+  template <PromotionMode promotion_mode>
   inline bool ShouldBePromoted(Address old_address, int object_size);
 
+  inline PromotionMode CurrentPromotionMode();
+
   void ClearNormalizedMapCaches();
 
   void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
@@ -795,6 +827,9 @@
   inline void SetGetterStubDeoptPCOffset(int pc_offset);
   inline void SetSetterStubDeoptPCOffset(int pc_offset);
   inline void SetInterpreterEntryReturnPCOffset(int pc_offset);
+  inline int GetNextTemplateSerialNumber();
+
+  inline void SetSerializedTemplates(FixedArray* templates);
 
   // For post mortem debugging.
   void RememberUnmappedPage(Address page, bool compacted);
@@ -807,12 +842,16 @@
     global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
   }
 
-  int64_t amount_of_external_allocated_memory() {
-    return amount_of_external_allocated_memory_;
+  int64_t external_memory() { return external_memory_; }
+  void update_external_memory(int64_t delta) { external_memory_ += delta; }
+
+  void update_external_memory_concurrently_freed(intptr_t freed) {
+    external_memory_concurrently_freed_.Increment(freed);
   }
 
-  void update_amount_of_external_allocated_memory(int64_t delta) {
-    amount_of_external_allocated_memory_ += delta;
+  void account_external_memory_concurrently_freed() {
+    external_memory_ -= external_memory_concurrently_freed_.Value();
+    external_memory_concurrently_freed_.SetValue(0);
   }
 
   void DeoptMarkedAllocationSites();
@@ -826,6 +865,8 @@
 
   DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj);
 
+  void CompactWeakFixedArrays();
+
   void AddRetainedMap(Handle<Map> map);
 
   // This event is triggered after successful allocation of a new object made
@@ -1168,6 +1209,13 @@
                          const char** object_sub_type);
 
   // ===========================================================================
+  // Code statistics. ==========================================================
+  // ===========================================================================
+
+  // Collect code (Code and BytecodeArray objects) statistics.
+  void CollectCodeStatistics();
+
+  // ===========================================================================
   // GC statistics. ============================================================
   // ===========================================================================
 
@@ -1342,10 +1390,6 @@
   void RegisterNewArrayBuffer(JSArrayBuffer* buffer);
   void UnregisterArrayBuffer(JSArrayBuffer* buffer);
 
-  inline ArrayBufferTracker* array_buffer_tracker() {
-    return array_buffer_tracker_;
-  }
-
   // ===========================================================================
   // Allocation site tracking. =================================================
   // ===========================================================================
@@ -1357,7 +1401,7 @@
   // value) is cached on the local pretenuring feedback.
   template <UpdateAllocationSiteMode mode>
   inline void UpdateAllocationSite(HeapObject* object,
-                                   HashMap* pretenuring_feedback);
+                                   base::HashMap* pretenuring_feedback);
 
   // Removes an entry from the global pretenuring storage.
   inline void RemoveAllocationSitePretenuringFeedback(AllocationSite* site);
@@ -1366,7 +1410,7 @@
   // method needs to be called after evacuation, as allocation sites may be
   // evacuated and this method resolves forward pointers accordingly.
   void MergeAllocationSitePretenuringFeedback(
-      const HashMap& local_pretenuring_feedback);
+      const base::HashMap& local_pretenuring_feedback);
 
 // =============================================================================
 
@@ -1683,7 +1727,8 @@
   // Performs a minor collection in new generation.
   void Scavenge();
 
-  Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
+  Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front,
+                     PromotionMode promotion_mode);
 
   void UpdateNewSpaceReferencesInExternalStringTable(
       ExternalStringTableUpdaterCallback updater_func);
@@ -1808,11 +1853,6 @@
   AllocateBytecodeArray(int length, const byte* raw_bytecodes, int frame_size,
                         int parameter_count, FixedArray* constant_pool);
 
-  // Copy the code and scope info part of the code object, but insert
-  // the provided data as the relocation information.
-  MUST_USE_RESULT AllocationResult CopyCode(Code* code,
-                                            Vector<byte> reloc_info);
-
   MUST_USE_RESULT AllocationResult CopyCode(Code* code);
 
   MUST_USE_RESULT AllocationResult
@@ -1972,12 +2012,17 @@
 
   void set_force_oom(bool value) { force_oom_ = value; }
 
-  // The amount of external memory registered through the API kept alive
-  // by global handles
-  int64_t amount_of_external_allocated_memory_;
+  // The amount of external memory registered through the API.
+  int64_t external_memory_;
 
-  // Caches the amount of external memory registered at the last global gc.
-  int64_t amount_of_external_allocated_memory_at_last_global_gc_;
+  // The limit when to trigger memory pressure from the API.
+  int64_t external_memory_limit_;
+
+  // Caches the amount of external memory registered at the last MC.
+  int64_t external_memory_at_last_mark_compact_;
+
+  // The amount of memory that has been freed concurrently.
+  base::AtomicNumber<intptr_t> external_memory_concurrently_freed_;
 
   // This can be calculated directly from a pointer to the heap; however, it is
   // more expedient to get at the isolate directly from within Heap methods.
@@ -2184,7 +2229,7 @@
   // storage is only alive temporary during a GC. The invariant is that all
   // pointers in this map are already fixed, i.e., they do not point to
   // forwarding pointers.
-  HashMap* global_pretenuring_feedback_;
+  base::HashMap* global_pretenuring_feedback_;
 
   char trace_ring_buffer_[kTraceRingBufferSize];
   // If it's not full then the data is from 0 to ring_buffer_end_.  If it's
@@ -2217,8 +2262,6 @@
 
   StrongRootsList* strong_roots_list_;
 
-  ArrayBufferTracker* array_buffer_tracker_;
-
   // The depth of HeapIterator nestings.
   int heap_iterator_depth_;
 
@@ -2236,7 +2279,7 @@
   friend class MarkCompactCollector;
   friend class MarkCompactMarkingVisitor;
   friend class NewSpace;
-  friend class ObjectStatsVisitor;
+  friend class ObjectStatsCollector;
   friend class Page;
   friend class Scavenger;
   friend class StoreBuffer;
diff --git a/src/heap/incremental-marking.cc b/src/heap/incremental-marking.cc
index c250b90..f578d43 100644
--- a/src/heap/incremental-marking.cc
+++ b/src/heap/incremental-marking.cc
@@ -10,8 +10,9 @@
 #include "src/heap/gc-idle-time-handler.h"
 #include "src/heap/gc-tracer.h"
 #include "src/heap/mark-compact-inl.h"
-#include "src/heap/objects-visiting.h"
+#include "src/heap/object-stats.h"
 #include "src/heap/objects-visiting-inl.h"
+#include "src/heap/objects-visiting.h"
 #include "src/tracing/trace-event.h"
 #include "src/v8.h"
 
@@ -175,6 +176,9 @@
     table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
     table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
     table_.Register(kVisitJSRegExp, &VisitJSRegExp);
+    if (FLAG_track_gc_object_stats) {
+      IncrementalMarkingObjectStatsVisitor::Initialize(&table_);
+    }
   }
 
   static const int kProgressBarScanningChunk = 32 * 1024;
@@ -231,7 +235,7 @@
     // Note that GC can happen when the context is not fully initialized,
     // so the cache can be undefined.
     Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
-    if (!cache->IsUndefined()) {
+    if (!cache->IsUndefined(map->GetIsolate())) {
       MarkObjectGreyDoNotEnqueue(cache);
     }
     VisitNativeContext(map, context);
@@ -341,9 +345,7 @@
 
 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
     PagedSpace* space) {
-  PageIterator it(space);
-  while (it.has_next()) {
-    Page* p = it.next();
+  for (Page* p : *space) {
     SetOldSpacePageFlags(p, false, false);
   }
 }
@@ -351,9 +353,7 @@
 
 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
     NewSpace* space) {
-  NewSpacePageIterator it(space);
-  while (it.has_next()) {
-    Page* p = it.next();
+  for (Page* p : *space) {
     SetNewSpacePageFlags(p, false);
   }
 }
@@ -365,27 +365,21 @@
   DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
   DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
 
-  LargePage* lop = heap_->lo_space()->first_page();
-  while (LargePage::IsValid(lop)) {
+  for (LargePage* lop : *heap_->lo_space()) {
     SetOldSpacePageFlags(lop, false, false);
-    lop = lop->next_page();
   }
 }
 
 
 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
-  PageIterator it(space);
-  while (it.has_next()) {
-    Page* p = it.next();
+  for (Page* p : *space) {
     SetOldSpacePageFlags(p, true, is_compacting_);
   }
 }
 
 
 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
-  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
-  while (it.has_next()) {
-    Page* p = it.next();
+  for (Page* p : *space) {
     SetNewSpacePageFlags(p, true);
   }
 }
@@ -397,10 +391,8 @@
   ActivateIncrementalWriteBarrier(heap_->code_space());
   ActivateIncrementalWriteBarrier(heap_->new_space());
 
-  LargePage* lop = heap_->lo_space()->first_page();
-  while (LargePage::IsValid(lop)) {
+  for (LargePage* lop : *heap_->lo_space()) {
     SetOldSpacePageFlags(lop, true, is_compacting_);
-    lop = lop->next_page();
   }
 }
 
@@ -469,9 +461,10 @@
   UnseededNumberDictionary* stubs = heap->code_stubs();
 
   int capacity = stubs->Capacity();
+  Isolate* isolate = heap->isolate();
   for (int i = 0; i < capacity; i++) {
     Object* k = stubs->KeyAt(i);
-    if (stubs->IsKey(k)) {
+    if (stubs->IsKey(isolate, k)) {
       uint32_t key = NumberToUint32(k);
 
       if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
@@ -537,6 +530,10 @@
 
   state_ = MARKING;
 
+  if (heap_->UsingEmbedderHeapTracer()) {
+    heap_->mark_compact_collector()->embedder_heap_tracer()->TracePrologue();
+  }
+
   RecordWriteStub::Mode mode = is_compacting_
                                    ? RecordWriteStub::INCREMENTAL_COMPACTION
                                    : RecordWriteStub::INCREMENTAL;
@@ -930,12 +927,12 @@
   }
 
   Object* context = heap_->native_contexts_list();
-  while (!context->IsUndefined()) {
+  while (!context->IsUndefined(heap_->isolate())) {
     // GC can happen when the context is not fully initialized,
     // so the cache can be undefined.
     HeapObject* cache = HeapObject::cast(
         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
-    if (!cache->IsUndefined()) {
+    if (!cache->IsUndefined(heap_->isolate())) {
       MarkBit mark_bit = Marking::MarkBitFrom(cache);
       if (Marking::IsGrey(mark_bit)) {
         Marking::GreyToBlack(mark_bit);
diff --git a/src/heap/incremental-marking.h b/src/heap/incremental-marking.h
index 9c5a3b5..5f5a1de 100644
--- a/src/heap/incremental-marking.h
+++ b/src/heap/incremental-marking.h
@@ -215,6 +215,8 @@
 
   bool black_allocation() { return black_allocation_; }
 
+  void StartBlackAllocationForTesting() { StartBlackAllocation(); }
+
  private:
   class Observer : public AllocationObserver {
    public:
diff --git a/src/heap/mark-compact-inl.h b/src/heap/mark-compact-inl.h
index 455f443..8ecdd62 100644
--- a/src/heap/mark-compact-inl.h
+++ b/src/heap/mark-compact-inl.h
@@ -79,7 +79,7 @@
 
 void CodeFlusher::AddCandidate(JSFunction* function) {
   DCHECK(function->code() == function->shared()->code());
-  if (function->next_function_link()->IsUndefined()) {
+  if (function->next_function_link()->IsUndefined(isolate_)) {
     SetNextCandidate(function, jsfunction_candidates_head_);
     jsfunction_candidates_head_ = function;
   }
@@ -105,7 +105,7 @@
 
 
 void CodeFlusher::ClearNextCandidate(JSFunction* candidate, Object* undefined) {
-  DCHECK(undefined->IsUndefined());
+  DCHECK(undefined->IsUndefined(candidate->GetIsolate()));
   candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
 }
 
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
index b2ae93d..f9a55df 100644
--- a/src/heap/mark-compact.cc
+++ b/src/heap/mark-compact.cc
@@ -25,7 +25,6 @@
 #include "src/heap/spaces-inl.h"
 #include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
-#include "src/profiler/cpu-profiler.h"
 #include "src/utils-inl.h"
 #include "src/v8.h"
 
@@ -132,13 +131,14 @@
 
 static void VerifyMarking(NewSpace* space) {
   Address end = space->top();
-  NewSpacePageIterator it(space->bottom(), end);
   // The bottom position is at the start of its page. Allows us to use
   // page->area_start() as start of range on all pages.
   CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start());
-  while (it.has_next()) {
-    Page* page = it.next();
-    Address limit = it.has_next() ? page->area_end() : end;
+
+  NewSpacePageRange range(space->bottom(), end);
+  for (auto it = range.begin(); it != range.end();) {
+    Page* page = *(it++);
+    Address limit = it != range.end() ? page->area_end() : end;
     CHECK(limit == end || !page->Contains(end));
     VerifyMarking(space->heap(), page->area_start(), limit);
   }
@@ -146,10 +146,7 @@
 
 
 static void VerifyMarking(PagedSpace* space) {
-  PageIterator it(space);
-
-  while (it.has_next()) {
-    Page* p = it.next();
+  for (Page* p : *space) {
     if (p->IsFlagSet(Page::BLACK_PAGE)) {
       VerifyMarkingBlackPage(space->heap(), p);
     } else {
@@ -205,13 +202,12 @@
 
 
 static void VerifyEvacuation(NewSpace* space) {
-  NewSpacePageIterator it(space->bottom(), space->top());
   VerifyEvacuationVisitor visitor;
-
-  while (it.has_next()) {
-    Page* page = it.next();
+  NewSpacePageRange range(space->bottom(), space->top());
+  for (auto it = range.begin(); it != range.end();) {
+    Page* page = *(it++);
     Address current = page->area_start();
-    Address limit = it.has_next() ? page->area_end() : space->top();
+    Address limit = it != range.end() ? page->area_end() : space->top();
     CHECK(limit == space->top() || !page->Contains(space->top()));
     while (current < limit) {
       HeapObject* object = HeapObject::FromAddress(current);
@@ -226,10 +222,7 @@
   if (FLAG_use_allocation_folding && (space == heap->old_space())) {
     return;
   }
-  PageIterator it(space);
-
-  while (it.has_next()) {
-    Page* p = it.next();
+  for (Page* p : *space) {
     if (p->IsEvacuationCandidate()) continue;
     VerifyEvacuation(p);
   }
@@ -361,10 +354,7 @@
 
 #ifdef VERIFY_HEAP
 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
-  PageIterator it(space);
-
-  while (it.has_next()) {
-    Page* p = it.next();
+  for (Page* p : *space) {
     CHECK(p->markbits()->IsClean());
     CHECK_EQ(0, p->LiveBytes());
   }
@@ -372,10 +362,7 @@
 
 
 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
-  NewSpacePageIterator it(space->bottom(), space->top());
-
-  while (it.has_next()) {
-    Page* p = it.next();
+  for (Page* p : NewSpacePageRange(space->bottom(), space->top())) {
     CHECK(p->markbits()->IsClean());
     CHECK_EQ(0, p->LiveBytes());
   }
@@ -420,10 +407,7 @@
 
 
 static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
-  PageIterator it(space);
-
-  while (it.has_next()) {
-    Page* p = it.next();
+  for (Page* p : *space) {
     Bitmap::Clear(p);
     if (p->IsFlagSet(Page::BLACK_PAGE)) {
       p->ClearFlag(Page::BLACK_PAGE);
@@ -433,10 +417,8 @@
 
 
 static void ClearMarkbitsInNewSpace(NewSpace* space) {
-  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
-
-  while (it.has_next()) {
-    Bitmap::Clear(it.next());
+  for (Page* page : *space) {
+    Bitmap::Clear(page);
   }
 }
 
@@ -472,13 +454,13 @@
  private:
   // v8::Task overrides.
   void Run() override {
-    DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE);
+    DCHECK_GE(space_to_start_, FIRST_SPACE);
     DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
-    const int offset = space_to_start_ - FIRST_PAGED_SPACE;
-    const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
+    const int offset = space_to_start_ - FIRST_SPACE;
+    const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1;
     for (int i = 0; i < num_spaces; i++) {
-      const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces);
-      DCHECK_GE(space_id, FIRST_PAGED_SPACE);
+      const int space_id = FIRST_SPACE + ((i + offset) % num_spaces);
+      DCHECK_GE(space_id, FIRST_SPACE);
       DCHECK_LE(space_id, LAST_PAGED_SPACE);
       sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0);
     }
@@ -508,7 +490,7 @@
 
 void MarkCompactCollector::Sweeper::StartSweepingHelper(
     AllocationSpace space_to_start) {
-  num_sweeping_tasks_++;
+  num_sweeping_tasks_.Increment(1);
   V8::GetCurrentPlatform()->CallOnBackgroundThread(
       new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start),
       v8::Platform::kShortRunningTask);
@@ -516,9 +498,8 @@
 
 void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
     Page* page) {
-  PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
   if (!page->SweepingDone()) {
-    ParallelSweepPage(page, owner);
+    ParallelSweepPage(page, page->owner()->identity());
     if (!page->SweepingDone()) {
       // We were not able to sweep that page, i.e., a concurrent
       // sweeper thread currently owns this page. Wait for the sweeper
@@ -555,18 +536,31 @@
   }
 
   if (FLAG_concurrent_sweeping) {
-    while (num_sweeping_tasks_ > 0) {
+    while (num_sweeping_tasks_.Value() > 0) {
       pending_sweeper_tasks_semaphore_.Wait();
-      num_sweeping_tasks_--;
+      num_sweeping_tasks_.Increment(-1);
     }
   }
 
-  ForAllSweepingSpaces(
-      [this](AllocationSpace space) { DCHECK(sweeping_list_[space].empty()); });
+  ForAllSweepingSpaces([this](AllocationSpace space) {
+    if (space == NEW_SPACE) {
+      swept_list_[NEW_SPACE].Clear();
+    }
+    DCHECK(sweeping_list_[space].empty());
+  });
   late_pages_ = false;
   sweeping_in_progress_ = false;
 }
 
+void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() {
+  if (!sweeping_in_progress_) return;
+  if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
+    for (Page* p : *heap_->new_space()) {
+      SweepOrWaitUntilSweepingCompleted(p);
+    }
+  }
+}
+
 void MarkCompactCollector::EnsureSweepingCompleted() {
   if (!sweeper().sweeping_in_progress()) return;
 
@@ -583,12 +577,11 @@
 }
 
 bool MarkCompactCollector::Sweeper::IsSweepingCompleted() {
-  if (!pending_sweeper_tasks_semaphore_.WaitFor(
-          base::TimeDelta::FromSeconds(0))) {
-    return false;
+  while (pending_sweeper_tasks_semaphore_.WaitFor(
+      base::TimeDelta::FromSeconds(0))) {
+    num_sweeping_tasks_.Increment(-1);
   }
-  pending_sweeper_tasks_semaphore_.Signal();
-  return true;
+  return num_sweeping_tasks_.Value() == 0;
 }
 
 void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) {
@@ -703,9 +696,7 @@
   std::vector<LiveBytesPagePair> pages;
   pages.reserve(number_of_pages);
 
-  PageIterator it(space);
-  while (it.has_next()) {
-    Page* p = it.next();
+  for (Page* p : *space) {
     if (p->NeverEvacuate()) continue;
     if (p->IsFlagSet(Page::BLACK_PAGE)) continue;
     // Invariant: Evacuation candidates are just created when marking is
@@ -858,9 +849,22 @@
     AbortWeakCells();
     AbortTransitionArrays();
     AbortCompaction();
+    if (heap_->UsingEmbedderHeapTracer()) {
+      heap_->mark_compact_collector()->embedder_heap_tracer()->AbortTracing();
+    }
     was_marked_incrementally_ = false;
   }
 
+  if (!was_marked_incrementally_) {
+    if (heap_->UsingEmbedderHeapTracer()) {
+      heap_->mark_compact_collector()->embedder_heap_tracer()->TracePrologue();
+    }
+  }
+
+  if (UsingEmbedderHeapTracer()) {
+    embedder_heap_tracer()->EnterFinalPause();
+  }
+
   // Don't start compaction if we are in the middle of incremental
   // marking cycle. We did not collect any slots.
   if (!FLAG_never_compact && !was_marked_incrementally_) {
@@ -872,6 +876,7 @@
        space = spaces.next()) {
     space->PrepareForMarkCompact();
   }
+  heap()->account_external_memory_concurrently_freed();
 
 #ifdef VERIFY_HEAP
   if (!was_marked_incrementally_ && FLAG_verify_heap) {
@@ -1074,7 +1079,7 @@
 
 
 void CodeFlusher::EvictCandidate(JSFunction* function) {
-  DCHECK(!function->next_function_link()->IsUndefined());
+  DCHECK(!function->next_function_link()->IsUndefined(isolate_));
   Object* undefined = isolate_->heap()->undefined_value();
 
   // Make sure previous flushing decisions are revisited.
@@ -1299,7 +1304,7 @@
   table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
 
   if (FLAG_track_gc_object_stats) {
-    ObjectStatsVisitor::Initialize(&table_);
+    MarkCompactObjectStatsVisitor::Initialize(&table_);
   }
 }
 
@@ -1408,6 +1413,8 @@
 
     HeapObject* object = HeapObject::cast(*p);
 
+    if (collector_->heap()->PurgeLeftTrimmedObject(p)) return;
+
     MarkBit mark_bit = Marking::MarkBitFrom(object);
     if (Marking::IsBlackOrGrey(mark_bit)) return;
 
@@ -1535,6 +1542,9 @@
 
 class RecordMigratedSlotVisitor final : public ObjectVisitor {
  public:
+  explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector)
+      : collector_(collector) {}
+
   inline void VisitPointer(Object** p) final {
     RecordMigratedSlot(*p, reinterpret_cast<Address>(p));
   }
@@ -1550,10 +1560,59 @@
     Address code_entry = Memory::Address_at(code_entry_slot);
     if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
       RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot),
-                                             CODE_ENTRY_SLOT, code_entry_slot);
+                                             nullptr, CODE_ENTRY_SLOT,
+                                             code_entry_slot);
     }
   }
 
+  inline void VisitCodeTarget(RelocInfo* rinfo) final {
+    DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
+    Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+    Code* host = rinfo->host();
+    collector_->RecordRelocSlot(host, rinfo, target);
+  }
+
+  inline void VisitDebugTarget(RelocInfo* rinfo) final {
+    DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+           rinfo->IsPatchedDebugBreakSlotSequence());
+    Code* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
+    Code* host = rinfo->host();
+    collector_->RecordRelocSlot(host, rinfo, target);
+  }
+
+  inline void VisitEmbeddedPointer(RelocInfo* rinfo) final {
+    DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+    HeapObject* object = HeapObject::cast(rinfo->target_object());
+    Code* host = rinfo->host();
+    collector_->RecordRelocSlot(host, rinfo, object);
+  }
+
+  inline void VisitCell(RelocInfo* rinfo) final {
+    DCHECK(rinfo->rmode() == RelocInfo::CELL);
+    Cell* cell = rinfo->target_cell();
+    Code* host = rinfo->host();
+    collector_->RecordRelocSlot(host, rinfo, cell);
+  }
+
+  // Entries that will never move.
+  inline void VisitCodeAgeSequence(RelocInfo* rinfo) final {
+    DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
+    Code* stub = rinfo->code_age_stub();
+    USE(stub);
+    DCHECK(!Page::FromAddress(stub->address())->IsEvacuationCandidate());
+  }
+
+  // Entries that are skipped for recording.
+  inline void VisitExternalReference(RelocInfo* rinfo) final {}
+  inline void VisitExternalReference(Address* p) final {}
+  inline void VisitRuntimeEntry(RelocInfo* rinfo) final {}
+  inline void VisitExternalOneByteString(
+      v8::String::ExternalOneByteStringResource** resource) final {}
+  inline void VisitExternalTwoByteString(
+      v8::String::ExternalStringResource** resource) final {}
+  inline void VisitInternalReference(RelocInfo* rinfo) final {}
+  inline void VisitEmbedderReference(Object** p, uint16_t class_id) final {}
+
  private:
   inline void RecordMigratedSlot(Object* value, Address slot) {
     if (value->IsHeapObject()) {
@@ -1565,6 +1624,8 @@
       }
     }
   }
+
+  MarkCompactCollector* collector_;
 };
 
 class MarkCompactCollector::HeapObjectVisitor {
@@ -1582,12 +1643,15 @@
       : heap_(heap),
         compaction_spaces_(compaction_spaces),
         profiling_(
-            heap->isolate()->cpu_profiler()->is_profiling() ||
+            heap->isolate()->is_profiling() ||
             heap->isolate()->logger()->is_logging_code_events() ||
             heap->isolate()->heap_profiler()->is_tracking_object_moves()) {}
 
   inline bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
                                 HeapObject** target_object) {
+#ifdef VERIFY_HEAP
+    if (AbortCompactionForTesting(object)) return false;
+#endif  // VERIFY_HEAP
     int size = object->Size();
     AllocationAlignment alignment = object->RequiredAlignment();
     AllocationResult allocation = target_space->AllocateRaw(size, alignment);
@@ -1622,7 +1686,7 @@
         PROFILE(heap_->isolate(),
                 CodeMoveEvent(AbstractCode::cast(src), dst_addr));
       }
-      RecordMigratedSlotVisitor visitor;
+      RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
       dst->IterateBodyFast(dst->map()->instance_type(), size, &visitor);
     } else if (dest == CODE_SPACE) {
       DCHECK_CODEOBJECT_SIZE(size, heap_->code_space());
@@ -1631,9 +1695,9 @@
                 CodeMoveEvent(AbstractCode::cast(src), dst_addr));
       }
       heap_->CopyBlock(dst_addr, src_addr, size);
-      RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(dst_addr),
-                                             RELOCATED_CODE_OBJECT, dst_addr);
       Code::cast(dst)->Relocate(dst_addr - src_addr);
+      RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
+      dst->IterateBodyFast(dst->map()->instance_type(), size, &visitor);
     } else {
       DCHECK_OBJECT_SIZE(size);
       DCHECK(dest == NEW_SPACE);
@@ -1645,6 +1709,26 @@
     Memory::Address_at(src_addr) = dst_addr;
   }
 
+#ifdef VERIFY_HEAP
+  bool AbortCompactionForTesting(HeapObject* object) {
+    if (FLAG_stress_compaction) {
+      const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
+                             Page::kPageAlignmentMask & ~kPointerAlignmentMask;
+      if ((reinterpret_cast<uintptr_t>(object->address()) &
+           Page::kPageAlignmentMask) == mask) {
+        Page* page = Page::FromAddress(object->address());
+        if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
+          page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
+        } else {
+          page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
+          return true;
+        }
+      }
+    }
+    return false;
+  }
+#endif  // VERIFY_HEAP
+
   Heap* heap_;
   CompactionSpaceCollection* compaction_spaces_;
   bool profiling_;
@@ -1658,7 +1742,7 @@
 
   explicit EvacuateNewSpaceVisitor(Heap* heap,
                                    CompactionSpaceCollection* compaction_spaces,
-                                   HashMap* local_pretenuring_feedback)
+                                   base::HashMap* local_pretenuring_feedback)
       : EvacuateVisitorBase(heap, compaction_spaces),
         buffer_(LocalAllocationBuffer::InvalidBuffer()),
         space_to_allocate_(NEW_SPACE),
@@ -1671,23 +1755,15 @@
                                                local_pretenuring_feedback_);
     int size = object->Size();
     HeapObject* target_object = nullptr;
-    if (heap_->ShouldBePromoted(object->address(), size) &&
+    if (heap_->ShouldBePromoted<DEFAULT_PROMOTION>(object->address(), size) &&
         TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
                           &target_object)) {
-      // If we end up needing more special cases, we should factor this out.
-      if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
-        heap_->array_buffer_tracker()->Promote(
-            JSArrayBuffer::cast(target_object));
-      }
       promoted_size_ += size;
       return true;
     }
     HeapObject* target = nullptr;
     AllocationSpace space = AllocateTargetObject(object, &target);
     MigrateObject(HeapObject::cast(target), object, size, space);
-    if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
-      heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
-    }
     semispace_copied_size_ += size;
     return true;
   }
@@ -1706,6 +1782,7 @@
     const int size = old_object->Size();
     AllocationAlignment alignment = old_object->RequiredAlignment();
     AllocationResult allocation;
+    AllocationSpace space_allocated_in = space_to_allocate_;
     if (space_to_allocate_ == NEW_SPACE) {
       if (size > kMaxLabObjectSize) {
         allocation =
@@ -1716,11 +1793,12 @@
     }
     if (allocation.IsRetry() || (space_to_allocate_ == OLD_SPACE)) {
       allocation = AllocateInOldSpace(size, alignment);
+      space_allocated_in = OLD_SPACE;
     }
     bool ok = allocation.To(target_object);
     DCHECK(ok);
     USE(ok);
-    return space_to_allocate_;
+    return space_allocated_in;
   }
 
   inline bool NewLocalAllocationBuffer() {
@@ -1795,36 +1873,44 @@
   AllocationSpace space_to_allocate_;
   intptr_t promoted_size_;
   intptr_t semispace_copied_size_;
-  HashMap* local_pretenuring_feedback_;
+  base::HashMap* local_pretenuring_feedback_;
 };
 
 class MarkCompactCollector::EvacuateNewSpacePageVisitor final
     : public MarkCompactCollector::HeapObjectVisitor {
  public:
-  EvacuateNewSpacePageVisitor() : promoted_size_(0) {}
+  explicit EvacuateNewSpacePageVisitor(Heap* heap)
+      : heap_(heap), promoted_size_(0), semispace_copied_size_(0) {}
 
-  static void TryMoveToOldSpace(Page* page, PagedSpace* owner) {
-    if (page->heap()->new_space()->ReplaceWithEmptyPage(page)) {
-      Page* new_page = Page::ConvertNewToOld(page, owner);
-      new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
-    }
+  static void MoveToOldSpace(Page* page, PagedSpace* owner) {
+    page->Unlink();
+    Page* new_page = Page::ConvertNewToOld(page, owner);
+    new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
+  }
+
+  static void MoveToToSpace(Page* page) {
+    page->heap()->new_space()->MovePageFromSpaceToSpace(page);
+    page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
   }
 
   inline bool Visit(HeapObject* object) {
-    if (V8_UNLIKELY(object->IsJSArrayBuffer())) {
-      object->GetHeap()->array_buffer_tracker()->Promote(
-          JSArrayBuffer::cast(object));
-    }
-    RecordMigratedSlotVisitor visitor;
+    RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
     object->IterateBodyFast(&visitor);
     promoted_size_ += object->Size();
     return true;
   }
 
   intptr_t promoted_size() { return promoted_size_; }
+  intptr_t semispace_copied_size() { return semispace_copied_size_; }
+
+  void account_semispace_copied(intptr_t copied) {
+    semispace_copied_size_ += copied;
+  }
 
  private:
+  Heap* heap_;
   intptr_t promoted_size_;
+  intptr_t semispace_copied_size_;
 };
 
 class MarkCompactCollector::EvacuateOldSpaceVisitor final
@@ -1849,30 +1935,20 @@
 class MarkCompactCollector::EvacuateRecordOnlyVisitor final
     : public MarkCompactCollector::HeapObjectVisitor {
  public:
-  explicit EvacuateRecordOnlyVisitor(AllocationSpace space) : space_(space) {}
+  explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
 
   inline bool Visit(HeapObject* object) {
-    if (space_ == OLD_SPACE) {
-      RecordMigratedSlotVisitor visitor;
-      object->IterateBody(&visitor);
-    } else {
-      DCHECK_EQ(space_, CODE_SPACE);
-      // Add a typed slot for the whole code object.
-      RememberedSet<OLD_TO_OLD>::InsertTyped(
-          Page::FromAddress(object->address()), RELOCATED_CODE_OBJECT,
-          object->address());
-    }
+    RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
+    object->IterateBody(&visitor);
     return true;
   }
 
  private:
-  AllocationSpace space_;
+  Heap* heap_;
 };
 
 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
-  PageIterator it(space);
-  while (it.has_next()) {
-    Page* p = it.next();
+  for (Page* p : *space) {
     if (!p->IsFlagSet(Page::BLACK_PAGE)) {
       DiscoverGreyObjectsOnPage(p);
     }
@@ -1883,9 +1959,7 @@
 
 void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
   NewSpace* space = heap()->new_space();
-  NewSpacePageIterator it(space->bottom(), space->top());
-  while (it.has_next()) {
-    Page* page = it.next();
+  for (Page* page : NewSpacePageRange(space->bottom(), space->top())) {
     DiscoverGreyObjectsOnPage(page);
     if (marking_deque()->IsFull()) return;
   }
@@ -2052,8 +2126,10 @@
   bool work_to_do = true;
   while (work_to_do) {
     if (UsingEmbedderHeapTracer()) {
-      embedder_heap_tracer()->TraceWrappersFrom(wrappers_to_trace_);
-      wrappers_to_trace_.clear();
+      RegisterWrappersWithEmbedderHeapTracer();
+      embedder_heap_tracer()->AdvanceTracing(
+          0, EmbedderHeapTracer::AdvanceTracingActions(
+                 EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
     }
     if (!only_process_harmony_weak_collections) {
       isolate()->global_handles()->IterateObjectGroups(
@@ -2170,6 +2246,15 @@
   embedder_heap_tracer_ = tracer;
 }
 
+void MarkCompactCollector::RegisterWrappersWithEmbedderHeapTracer() {
+  DCHECK(UsingEmbedderHeapTracer());
+  if (wrappers_to_trace_.empty()) {
+    return;
+  }
+  embedder_heap_tracer()->RegisterV8References(wrappers_to_trace_);
+  wrappers_to_trace_.clear();
+}
+
 void MarkCompactCollector::TracePossibleWrapper(JSObject* js_object) {
   DCHECK(js_object->WasConstructedFromApiFunction());
   if (js_object->GetInternalFieldCount() >= 2 &&
@@ -2177,7 +2262,7 @@
       js_object->GetInternalField(0) != heap_->undefined_value() &&
       js_object->GetInternalField(1) != heap_->undefined_value()) {
     DCHECK(reinterpret_cast<intptr_t>(js_object->GetInternalField(0)) % 2 == 0);
-    wrappers_to_trace().push_back(std::pair<void*, void*>(
+    wrappers_to_trace_.push_back(std::pair<void*, void*>(
         reinterpret_cast<void*>(js_object->GetInternalField(0)),
         reinterpret_cast<void*>(js_object->GetInternalField(1))));
   }
@@ -2210,6 +2295,10 @@
     } else {
       // Abort any pending incremental activities e.g. incremental sweeping.
       incremental_marking->Stop();
+      if (FLAG_track_gc_object_stats) {
+        // Clear object stats collected during incremental marking.
+        heap()->object_stats_->ClearObjectStats();
+      }
       if (marking_deque_.in_use()) {
         marking_deque_.Uninitialize(true);
       }
@@ -2246,10 +2335,6 @@
     {
       TRACE_GC(heap()->tracer(),
                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL);
-      if (UsingEmbedderHeapTracer()) {
-        embedder_heap_tracer()->TracePrologue();
-        ProcessMarkingDeque();
-      }
       ProcessEphemeralMarking(&root_visitor, false);
     }
 
@@ -2378,7 +2463,7 @@
   for (uint32_t i = 0; i < capacity; i++) {
     uint32_t key_index = table->EntryToIndex(i);
     Object* key = table->get(key_index);
-    if (!table->IsKey(key)) continue;
+    if (!table->IsKey(isolate, key)) continue;
     uint32_t value_index = table->EntryToValueIndex(i);
     Object* value = table->get(value_index);
     DCHECK(key->IsWeakCell());
@@ -2750,128 +2835,20 @@
         slot_type = OBJECT_SLOT;
       }
     }
-    RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, slot_type, addr);
+    RememberedSet<OLD_TO_OLD>::InsertTyped(
+        source_page, reinterpret_cast<Address>(host), slot_type, addr);
   }
 }
 
-static inline void UpdateTypedSlot(Isolate* isolate, ObjectVisitor* v,
-                                   SlotType slot_type, Address addr) {
-  switch (slot_type) {
-    case CODE_TARGET_SLOT: {
-      RelocInfo rinfo(isolate, addr, RelocInfo::CODE_TARGET, 0, NULL);
-      rinfo.Visit(isolate, v);
-      break;
-    }
-    case CELL_TARGET_SLOT: {
-      RelocInfo rinfo(isolate, addr, RelocInfo::CELL, 0, NULL);
-      rinfo.Visit(isolate, v);
-      break;
-    }
-    case CODE_ENTRY_SLOT: {
-      v->VisitCodeEntry(addr);
-      break;
-    }
-    case RELOCATED_CODE_OBJECT: {
-      HeapObject* obj = HeapObject::FromAddress(addr);
-      Code::BodyDescriptor::IterateBody(obj, v);
-      break;
-    }
-    case DEBUG_TARGET_SLOT: {
-      RelocInfo rinfo(isolate, addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION, 0,
-                      NULL);
-      if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
-      break;
-    }
-    case EMBEDDED_OBJECT_SLOT: {
-      RelocInfo rinfo(isolate, addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
-      rinfo.Visit(isolate, v);
-      break;
-    }
-    case OBJECT_SLOT: {
-      v->VisitPointer(reinterpret_cast<Object**>(addr));
-      break;
-    }
-    default:
-      UNREACHABLE();
-      break;
-  }
-}
+static inline SlotCallbackResult UpdateSlot(Object** slot) {
+  Object* obj = reinterpret_cast<Object*>(
+      base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
 
-
-// Visitor for updating pointers from live objects in old spaces to new space.
-// It does not expect to encounter pointers to dead objects.
-class PointersUpdatingVisitor : public ObjectVisitor {
- public:
-  explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
-
-  void VisitPointer(Object** p) override { UpdatePointer(p); }
-
-  void VisitPointers(Object** start, Object** end) override {
-    for (Object** p = start; p < end; p++) UpdatePointer(p);
-  }
-
-  void VisitCell(RelocInfo* rinfo) override {
-    DCHECK(rinfo->rmode() == RelocInfo::CELL);
-    Object* cell = rinfo->target_cell();
-    Object* old_cell = cell;
-    VisitPointer(&cell);
-    if (cell != old_cell) {
-      rinfo->set_target_cell(reinterpret_cast<Cell*>(cell));
-    }
-  }
-
-  void VisitEmbeddedPointer(RelocInfo* rinfo) override {
-    DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
-    Object* target = rinfo->target_object();
-    Object* old_target = target;
-    VisitPointer(&target);
-    // Avoid unnecessary changes that might unnecessary flush the instruction
-    // cache.
-    if (target != old_target) {
-      rinfo->set_target_object(target);
-    }
-  }
-
-  void VisitCodeTarget(RelocInfo* rinfo) override {
-    DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
-    Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
-    Object* old_target = target;
-    VisitPointer(&target);
-    if (target != old_target) {
-      rinfo->set_target_address(Code::cast(target)->instruction_start());
-    }
-  }
-
-  void VisitCodeAgeSequence(RelocInfo* rinfo) override {
-    DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
-    Object* stub = rinfo->code_age_stub();
-    DCHECK(stub != NULL);
-    VisitPointer(&stub);
-    if (stub != rinfo->code_age_stub()) {
-      rinfo->set_code_age_stub(Code::cast(stub));
-    }
-  }
-
-  void VisitDebugTarget(RelocInfo* rinfo) override {
-    DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
-           rinfo->IsPatchedDebugBreakSlotSequence());
-    Object* target =
-        Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
-    VisitPointer(&target);
-    rinfo->set_debug_call_address(Code::cast(target)->instruction_start());
-  }
-
-  static inline void UpdateSlot(Heap* heap, Object** slot) {
-    Object* obj = reinterpret_cast<Object*>(
-        base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
-
-    if (!obj->IsHeapObject()) return;
-
+  if (obj->IsHeapObject()) {
     HeapObject* heap_obj = HeapObject::cast(obj);
-
     MapWord map_word = heap_obj->map_word();
     if (map_word.IsForwardingAddress()) {
-      DCHECK(heap->InFromSpace(heap_obj) ||
+      DCHECK(heap_obj->GetHeap()->InFromSpace(heap_obj) ||
              MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
              Page::FromAddress(heap_obj->address())
                  ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
@@ -2880,15 +2857,42 @@
           reinterpret_cast<base::AtomicWord*>(slot),
           reinterpret_cast<base::AtomicWord>(obj),
           reinterpret_cast<base::AtomicWord>(target));
-      DCHECK(!heap->InFromSpace(target) &&
+      DCHECK(!heap_obj->GetHeap()->InFromSpace(target) &&
              !MarkCompactCollector::IsOnEvacuationCandidate(target));
     }
   }
+  return REMOVE_SLOT;
+}
 
- private:
-  inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); }
+// Visitor for updating pointers from live objects in old spaces to new space.
+// It does not expect to encounter pointers to dead objects.
+class PointersUpdatingVisitor : public ObjectVisitor {
+ public:
+  void VisitPointer(Object** p) override { UpdateSlot(p); }
 
-  Heap* heap_;
+  void VisitPointers(Object** start, Object** end) override {
+    for (Object** p = start; p < end; p++) UpdateSlot(p);
+  }
+
+  void VisitCell(RelocInfo* rinfo) override {
+    UpdateTypedSlotHelper::UpdateCell(rinfo, UpdateSlot);
+  }
+
+  void VisitEmbeddedPointer(RelocInfo* rinfo) override {
+    UpdateTypedSlotHelper::UpdateEmbeddedPointer(rinfo, UpdateSlot);
+  }
+
+  void VisitCodeTarget(RelocInfo* rinfo) override {
+    UpdateTypedSlotHelper::UpdateCodeTarget(rinfo, UpdateSlot);
+  }
+
+  void VisitCodeEntry(Address entry_address) override {
+    UpdateTypedSlotHelper::UpdateCodeEntry(entry_address, UpdateSlot);
+  }
+
+  void VisitDebugTarget(RelocInfo* rinfo) override {
+    UpdateTypedSlotHelper::UpdateDebugTarget(rinfo, UpdateSlot);
+  }
 };
 
 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
@@ -3033,21 +3037,34 @@
 
 void MarkCompactCollector::EvacuateNewSpacePrologue() {
   NewSpace* new_space = heap()->new_space();
-  NewSpacePageIterator it(new_space->bottom(), new_space->top());
   // Append the list of new space pages to be processed.
-  while (it.has_next()) {
-    newspace_evacuation_candidates_.Add(it.next());
+  for (Page* p : NewSpacePageRange(new_space->bottom(), new_space->top())) {
+    newspace_evacuation_candidates_.Add(p);
   }
   new_space->Flip();
   new_space->ResetAllocationInfo();
 }
 
-void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
-  newspace_evacuation_candidates_.Rewind(0);
-}
-
 class MarkCompactCollector::Evacuator : public Malloced {
  public:
+  enum EvacuationMode {
+    kObjectsNewToOld,
+    kPageNewToOld,
+    kObjectsOldToOld,
+    kPageNewToNew,
+  };
+
+  static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
+    // Note: The order of checks is important in this function.
+    if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
+      return kPageNewToOld;
+    if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
+      return kPageNewToNew;
+    if (chunk->InNewSpace()) return kObjectsNewToOld;
+    DCHECK(chunk->IsEvacuationCandidate());
+    return kObjectsOldToOld;
+  }
+
   // NewSpacePages with more live bytes than this threshold qualify for fast
   // evacuation.
   static int PageEvacuationThreshold() {
@@ -3059,11 +3076,11 @@
   explicit Evacuator(MarkCompactCollector* collector)
       : collector_(collector),
         compaction_spaces_(collector->heap()),
-        local_pretenuring_feedback_(HashMap::PointersMatch,
+        local_pretenuring_feedback_(base::HashMap::PointersMatch,
                                     kInitialLocalPretenuringFeedbackCapacity),
         new_space_visitor_(collector->heap(), &compaction_spaces_,
                            &local_pretenuring_feedback_),
-        new_space_page_visitor(),
+        new_space_page_visitor(collector->heap()),
         old_space_visitor_(collector->heap(), &compaction_spaces_),
         duration_(0.0),
         bytes_compacted_(0) {}
@@ -3077,38 +3094,20 @@
   CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
 
  private:
-  enum EvacuationMode {
-    kObjectsNewToOld,
-    kPageNewToOld,
-    kObjectsOldToOld,
-  };
-
   static const int kInitialLocalPretenuringFeedbackCapacity = 256;
 
   inline Heap* heap() { return collector_->heap(); }
 
-  inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
-    // Note: The order of checks is important in this function.
-    if (chunk->InNewSpace()) return kObjectsNewToOld;
-    if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
-      return kPageNewToOld;
-    DCHECK(chunk->IsEvacuationCandidate());
-    return kObjectsOldToOld;
-  }
-
   void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
     duration_ += duration;
     bytes_compacted_ += bytes_compacted;
   }
 
-  template <IterationMode mode, class Visitor>
-  inline bool EvacuateSinglePage(Page* p, Visitor* visitor);
-
   MarkCompactCollector* collector_;
 
   // Locally cached collector data.
   CompactionSpaceCollection compaction_spaces_;
-  HashMap local_pretenuring_feedback_;
+  base::HashMap local_pretenuring_feedback_;
 
   // Visitors for the corresponding spaces.
   EvacuateNewSpaceVisitor new_space_visitor_;
@@ -3120,75 +3119,78 @@
   intptr_t bytes_compacted_;
 };
 
-template <MarkCompactCollector::IterationMode mode, class Visitor>
-bool MarkCompactCollector::Evacuator::EvacuateSinglePage(Page* p,
-                                                         Visitor* visitor) {
+bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
   bool success = false;
-  DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() ||
-         p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
-  int saved_live_bytes = p->LiveBytes();
-  double evacuation_time;
+  DCHECK(page->SweepingDone());
+  int saved_live_bytes = page->LiveBytes();
+  double evacuation_time = 0.0;
+  Heap* heap = page->heap();
   {
-    AlwaysAllocateScope always_allocate(heap()->isolate());
+    AlwaysAllocateScope always_allocate(heap->isolate());
     TimedScope timed_scope(&evacuation_time);
-    success = collector_->VisitLiveObjects<Visitor>(p, visitor, mode);
+    switch (ComputeEvacuationMode(page)) {
+      case kObjectsNewToOld:
+        success = collector_->VisitLiveObjects(page, &new_space_visitor_,
+                                               kClearMarkbits);
+        ArrayBufferTracker::ProcessBuffers(
+            page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
+        DCHECK(success);
+        break;
+      case kPageNewToOld:
+        success = collector_->VisitLiveObjects(page, &new_space_page_visitor,
+                                               kKeepMarking);
+        // ArrayBufferTracker will be updated during sweeping.
+        DCHECK(success);
+        break;
+      case kPageNewToNew:
+        new_space_page_visitor.account_semispace_copied(page->LiveBytes());
+        // ArrayBufferTracker will be updated during sweeping.
+        success = true;
+        break;
+      case kObjectsOldToOld:
+        success = collector_->VisitLiveObjects(page, &old_space_visitor_,
+                                               kClearMarkbits);
+        if (!success) {
+          // Aborted compaction page. We have to record slots here, since we
+          // might not have recorded them in first place.
+          // Note: We mark the page as aborted here to be able to record slots
+          // for code objects in |RecordMigratedSlotVisitor|.
+          page->SetFlag(Page::COMPACTION_WAS_ABORTED);
+          EvacuateRecordOnlyVisitor record_visitor(collector_->heap());
+          success =
+              collector_->VisitLiveObjects(page, &record_visitor, kKeepMarking);
+          ArrayBufferTracker::ProcessBuffers(
+              page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
+          DCHECK(success);
+          // We need to return failure here to indicate that we want this page
+          // added to the sweeper.
+          success = false;
+        } else {
+          ArrayBufferTracker::ProcessBuffers(
+              page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
+        }
+        break;
+      default:
+        UNREACHABLE();
+    }
   }
+  ReportCompactionProgress(evacuation_time, saved_live_bytes);
   if (FLAG_trace_evacuation) {
-    const char age_mark_tag =
-        !p->InNewSpace()
-            ? 'x'
-            : !p->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)
-                  ? '>'
-                  : !p->ContainsLimit(heap()->new_space()->age_mark()) ? '<'
-                                                                       : '#';
-    PrintIsolate(heap()->isolate(),
-                 "evacuation[%p]: page=%p new_space=%d age_mark_tag=%c "
-                 "page_evacuation=%d executable=%d live_bytes=%d time=%f\n",
-                 this, p, p->InNewSpace(), age_mark_tag,
-                 p->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION),
-                 p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes,
-                 evacuation_time);
-  }
-  if (success) {
-    ReportCompactionProgress(evacuation_time, saved_live_bytes);
+    PrintIsolate(heap->isolate(),
+                 "evacuation[%p]: page=%p new_space=%d "
+                 "page_evacuation=%d executable=%d contains_age_mark=%d "
+                 "live_bytes=%d time=%f\n",
+                 static_cast<void*>(this), static_cast<void*>(page),
+                 page->InNewSpace(),
+                 page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
+                     page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
+                 page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
+                 page->Contains(heap->new_space()->age_mark()),
+                 saved_live_bytes, evacuation_time);
   }
   return success;
 }
 
-bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
-  bool result = false;
-  DCHECK(page->SweepingDone());
-  switch (ComputeEvacuationMode(page)) {
-    case kObjectsNewToOld:
-      result = EvacuateSinglePage<kClearMarkbits>(page, &new_space_visitor_);
-      DCHECK(result);
-      USE(result);
-      break;
-    case kPageNewToOld:
-      result = EvacuateSinglePage<kKeepMarking>(page, &new_space_page_visitor);
-      DCHECK(result);
-      USE(result);
-      break;
-    case kObjectsOldToOld:
-      result = EvacuateSinglePage<kClearMarkbits>(page, &old_space_visitor_);
-      if (!result) {
-        // Aborted compaction page. We can record slots here to have them
-        // processed in parallel later on.
-        EvacuateRecordOnlyVisitor record_visitor(page->owner()->identity());
-        result = EvacuateSinglePage<kKeepMarking>(page, &record_visitor);
-        DCHECK(result);
-        USE(result);
-        // We need to return failure here to indicate that we want this page
-        // added to the sweeper.
-        return false;
-      }
-      break;
-    default:
-      UNREACHABLE();
-  }
-  return result;
-}
-
 void MarkCompactCollector::Evacuator::Finalize() {
   heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
   heap()->code_space()->MergeCompactionSpace(
@@ -3197,11 +3199,13 @@
   heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
                                        new_space_page_visitor.promoted_size());
   heap()->IncrementSemiSpaceCopiedObjectSize(
-      new_space_visitor_.semispace_copied_size());
+      new_space_visitor_.semispace_copied_size() +
+      new_space_page_visitor.semispace_copied_size());
   heap()->IncrementYoungSurvivorsCounter(
       new_space_visitor_.promoted_size() +
       new_space_visitor_.semispace_copied_size() +
-      new_space_page_visitor.promoted_size());
+      new_space_page_visitor.promoted_size() +
+      new_space_page_visitor.semispace_copied_size());
   heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
 }
 
@@ -3249,31 +3253,33 @@
 
   static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
                                        bool success, PerPageData data) {
-    if (chunk->InNewSpace()) {
-      DCHECK(success);
-    } else if (chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
-      DCHECK(success);
-      Page* p = static_cast<Page*>(chunk);
-      p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
-      p->ForAllFreeListCategories(
-          [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
-      heap->mark_compact_collector()->sweeper().AddLatePage(
-          p->owner()->identity(), p);
-    } else {
-      Page* p = static_cast<Page*>(chunk);
-      if (success) {
-        DCHECK(p->IsEvacuationCandidate());
-        DCHECK(p->SweepingDone());
-        p->Unlink();
-      } else {
-        // We have partially compacted the page, i.e., some objects may have
-        // moved, others are still in place.
-        p->SetFlag(Page::COMPACTION_WAS_ABORTED);
-        p->ClearEvacuationCandidate();
-        // Slots have already been recorded so we just need to add it to the
-        // sweeper.
-        *data += 1;
-      }
+    using Evacuator = MarkCompactCollector::Evacuator;
+    Page* p = static_cast<Page*>(chunk);
+    switch (Evacuator::ComputeEvacuationMode(p)) {
+      case Evacuator::kPageNewToOld:
+        break;
+      case Evacuator::kPageNewToNew:
+        DCHECK(success);
+        break;
+      case Evacuator::kObjectsNewToOld:
+        DCHECK(success);
+        break;
+      case Evacuator::kObjectsOldToOld:
+        if (success) {
+          DCHECK(p->IsEvacuationCandidate());
+          DCHECK(p->SweepingDone());
+          p->Unlink();
+        } else {
+          // We have partially compacted the page, i.e., some objects may have
+          // moved, others are still in place.
+          p->ClearEvacuationCandidate();
+          // Slots have already been recorded so we just need to add it to the
+          // sweeper, which will happen after updating pointers.
+          *data += 1;
+        }
+        break;
+      default:
+        UNREACHABLE();
     }
   }
 };
@@ -3295,10 +3301,14 @@
     live_bytes += page->LiveBytes();
     if (!page->NeverEvacuate() &&
         (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
-        page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
         !page->Contains(age_mark)) {
-      EvacuateNewSpacePageVisitor::TryMoveToOldSpace(page, heap()->old_space());
+      if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
+        EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space());
+      } else {
+        EvacuateNewSpacePageVisitor::MoveToToSpace(page);
+      }
     }
+
     job.AddPage(page, &abandoned_pages);
   }
   DCHECK_GE(job.NumberOfPages(), 1);
@@ -3352,16 +3362,21 @@
 template <MarkCompactCollector::Sweeper::SweepingMode sweeping_mode,
           MarkCompactCollector::Sweeper::SweepingParallelism parallelism,
           MarkCompactCollector::Sweeper::SkipListRebuildingMode skip_list_mode,
+          MarkCompactCollector::Sweeper::FreeListRebuildingMode free_list_mode,
           MarkCompactCollector::Sweeper::FreeSpaceTreatmentMode free_space_mode>
 int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p,
                                             ObjectVisitor* v) {
   DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
   DCHECK(!p->IsFlagSet(Page::BLACK_PAGE));
-  DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
-            space->identity() == CODE_SPACE);
+  DCHECK((space == nullptr) || (space->identity() != CODE_SPACE) ||
+         (skip_list_mode == REBUILD_SKIP_LIST));
   DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
   DCHECK(parallelism == SWEEP_ON_MAIN_THREAD || sweeping_mode == SWEEP_ONLY);
 
+  // Before we sweep objects on the page, we free dead array buffers which
+  // requires valid mark bits.
+  ArrayBufferTracker::FreeDead(p);
+
   Address free_start = p->area_start();
   DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
 
@@ -3387,8 +3402,13 @@
       if (free_space_mode == ZAP_FREE_SPACE) {
         memset(free_start, 0xcc, size);
       }
-      freed_bytes = space->UnaccountedFree(free_start, size);
-      max_freed_bytes = Max(freed_bytes, max_freed_bytes);
+      if (free_list_mode == REBUILD_FREE_LIST) {
+        freed_bytes = space->UnaccountedFree(free_start, size);
+        max_freed_bytes = Max(freed_bytes, max_freed_bytes);
+      } else {
+        p->heap()->CreateFillerObjectAt(free_start, size,
+                                        ClearRecordedSlots::kNo);
+      }
     }
     Map* map = object->synchronized_map();
     int size = object->SizeFromMap(map);
@@ -3415,10 +3435,16 @@
     if (free_space_mode == ZAP_FREE_SPACE) {
       memset(free_start, 0xcc, size);
     }
-    freed_bytes = space->UnaccountedFree(free_start, size);
-    max_freed_bytes = Max(freed_bytes, max_freed_bytes);
+    if (free_list_mode == REBUILD_FREE_LIST) {
+      freed_bytes = space->UnaccountedFree(free_start, size);
+      max_freed_bytes = Max(freed_bytes, max_freed_bytes);
+    } else {
+      p->heap()->CreateFillerObjectAt(free_start, size,
+                                      ClearRecordedSlots::kNo);
+    }
   }
   p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
+  if (free_list_mode == IGNORE_FREE_LIST) return 0;
   return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
 }
 
@@ -3438,6 +3464,7 @@
     Address start = code->instruction_start();
     Address end = code->address() + code->Size();
     RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(page, start, end);
+    RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, start, end);
   }
 }
 
@@ -3533,12 +3560,15 @@
 
     EvacuateNewSpacePrologue();
     EvacuatePagesInParallel();
-    EvacuateNewSpaceEpilogue();
     heap()->new_space()->set_age_mark(heap()->new_space()->top());
   }
 
   UpdatePointersAfterEvacuation();
 
+  if (!heap()->new_space()->Rebalance()) {
+    FatalProcessOutOfMemory("NewSpace::Rebalance");
+  }
+
   // Give pages that are queued to be freed back to the OS. Note that filtering
   // slots only handles old space (for unboxed doubles), and thus map space can
   // still contain stale pointers. We only free the chunks after pointer updates
@@ -3548,6 +3578,19 @@
   {
     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
 
+    for (Page* p : newspace_evacuation_candidates_) {
+      if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
+        p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
+        sweeper().AddLatePage(p->owner()->identity(), p);
+      } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
+        p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
+        p->ForAllFreeListCategories(
+            [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
+        sweeper().AddLatePage(p->owner()->identity(), p);
+      }
+    }
+    newspace_evacuation_candidates_.Rewind(0);
+
     for (Page* p : evacuation_candidates_) {
       // Important: skip list should be cleared only after roots were updated
       // because root iteration traverses the stack and might have to find
@@ -3560,11 +3603,6 @@
       }
     }
 
-    // EvacuateNewSpaceAndCandidates iterates over new space objects and for
-    // ArrayBuffers either re-registers them as live or promotes them. This is
-    // needed to properly free them.
-    heap()->array_buffer_tracker()->FreeDead(false);
-
     // Deallocate evacuated candidate pages.
     ReleaseEvacuationCandidates();
   }
@@ -3580,12 +3618,12 @@
 class PointerUpdateJobTraits {
  public:
   typedef int PerPageData;  // Per page data is not used in this job.
-  typedef PointersUpdatingVisitor* PerTaskData;
+  typedef int PerTaskData;  // Per task data is not used in this job.
 
-  static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
-                                    MemoryChunk* chunk, PerPageData) {
+  static bool ProcessPageInParallel(Heap* heap, PerTaskData, MemoryChunk* chunk,
+                                    PerPageData) {
     UpdateUntypedPointers(heap, chunk);
-    UpdateTypedPointers(heap, chunk, visitor);
+    UpdateTypedPointers(heap, chunk);
     return true;
   }
   static const bool NeedSequentialFinalization = false;
@@ -3595,37 +3633,71 @@
  private:
   static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) {
     if (direction == OLD_TO_NEW) {
-      RememberedSet<OLD_TO_NEW>::IterateWithWrapper(heap, chunk,
-                                                    UpdateOldToNewSlot);
+      RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap, chunk](Address slot) {
+        return CheckAndUpdateOldToNewSlot(heap, slot);
+      });
     } else {
-      RememberedSet<OLD_TO_OLD>::Iterate(chunk, [heap](Address slot) {
-        PointersUpdatingVisitor::UpdateSlot(heap,
-                                            reinterpret_cast<Object**>(slot));
-        return REMOVE_SLOT;
+      RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) {
+        return UpdateSlot(reinterpret_cast<Object**>(slot));
       });
     }
   }
 
-  static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk,
-                                  PointersUpdatingVisitor* visitor) {
+  static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk) {
     if (direction == OLD_TO_OLD) {
       Isolate* isolate = heap->isolate();
       RememberedSet<OLD_TO_OLD>::IterateTyped(
-          chunk, [isolate, visitor](SlotType type, Address slot) {
-            UpdateTypedSlot(isolate, visitor, type, slot);
-            return REMOVE_SLOT;
+          chunk, [isolate](SlotType type, Address host_addr, Address slot) {
+            return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, type, slot,
+                                                          UpdateSlot);
+          });
+    } else {
+      Isolate* isolate = heap->isolate();
+      RememberedSet<OLD_TO_NEW>::IterateTyped(
+          chunk,
+          [isolate, heap](SlotType type, Address host_addr, Address slot) {
+            return UpdateTypedSlotHelper::UpdateTypedSlot(
+                isolate, type, slot, [heap](Object** slot) {
+                  return CheckAndUpdateOldToNewSlot(
+                      heap, reinterpret_cast<Address>(slot));
+                });
           });
     }
   }
 
-  static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) {
-    MapWord map_word = object->map_word();
-    // There could still be stale pointers in large object space, map space,
-    // and old space for pages that have been promoted.
-    if (map_word.IsForwardingAddress()) {
-      // Update the corresponding slot.
-      *address = map_word.ToForwardingAddress();
+  static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap,
+                                                       Address slot_address) {
+    Object** slot = reinterpret_cast<Object**>(slot_address);
+    if (heap->InFromSpace(*slot)) {
+      HeapObject* heap_object = reinterpret_cast<HeapObject*>(*slot);
+      DCHECK(heap_object->IsHeapObject());
+      MapWord map_word = heap_object->map_word();
+      // There could still be stale pointers in large object space, map space,
+      // and old space for pages that have been promoted.
+      if (map_word.IsForwardingAddress()) {
+        // Update the corresponding slot.
+        *slot = map_word.ToForwardingAddress();
+      }
+      // If the object was in from space before and is after executing the
+      // callback in to space, the object is still live.
+      // Unfortunately, we do not know about the slot. It could be in a
+      // just freed free space object.
+      if (heap->InToSpace(*slot)) {
+        return KEEP_SLOT;
+      }
+    } else if (heap->InToSpace(*slot)) {
+      DCHECK(Page::FromAddress(reinterpret_cast<HeapObject*>(*slot)->address())
+                 ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
+      // Slots can be in "to" space after a page has been moved. Since there is
+      // no forwarding information present we need to check the markbits to
+      // determine liveness.
+      if (Marking::IsBlack(
+              Marking::MarkBitFrom(reinterpret_cast<HeapObject*>(*slot))))
+        return KEEP_SLOT;
+    } else {
+      DCHECK(!heap->InNewSpace(*slot));
     }
+    return REMOVE_SLOT;
   }
 };
 
@@ -3642,10 +3714,9 @@
       heap, heap->isolate()->cancelable_task_manager(), semaphore);
   RememberedSet<direction>::IterateMemoryChunks(
       heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
-  PointersUpdatingVisitor visitor(heap);
   int num_pages = job.NumberOfPages();
   int num_tasks = NumberOfPointerUpdateTasks(num_pages);
-  job.Run(num_tasks, [&visitor](int i) { return &visitor; });
+  job.Run(num_tasks, [](int i) { return 0; });
 }
 
 class ToSpacePointerUpdateJobTraits {
@@ -3655,6 +3726,24 @@
 
   static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
                                     MemoryChunk* chunk, PerPageData limits) {
+    if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
+      // New->new promoted pages contain garbage so they require iteration
+      // using markbits.
+      ProcessPageInParallelVisitLive(heap, visitor, chunk, limits);
+    } else {
+      ProcessPageInParallelVisitAll(heap, visitor, chunk, limits);
+    }
+    return true;
+  }
+
+  static const bool NeedSequentialFinalization = false;
+  static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
+  }
+
+ private:
+  static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor,
+                                            MemoryChunk* chunk,
+                                            PerPageData limits) {
     for (Address cur = limits.first; cur < limits.second;) {
       HeapObject* object = HeapObject::FromAddress(cur);
       Map* map = object->map();
@@ -3662,10 +3751,18 @@
       object->IterateBody(map->instance_type(), size, visitor);
       cur += size;
     }
-    return true;
   }
-  static const bool NeedSequentialFinalization = false;
-  static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
+
+  static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor,
+                                             MemoryChunk* chunk,
+                                             PerPageData limits) {
+    LiveObjectIterator<kBlackObjects> it(chunk);
+    HeapObject* object = NULL;
+    while ((object = it.Next()) != NULL) {
+      Map* map = object->map();
+      int size = object->SizeFromMap(map);
+      object->IterateBody(map->instance_type(), size, visitor);
+    }
   }
 };
 
@@ -3674,15 +3771,13 @@
       heap, heap->isolate()->cancelable_task_manager(), semaphore);
   Address space_start = heap->new_space()->bottom();
   Address space_end = heap->new_space()->top();
-  NewSpacePageIterator it(space_start, space_end);
-  while (it.has_next()) {
-    Page* page = it.next();
+  for (Page* page : NewSpacePageRange(space_start, space_end)) {
     Address start =
         page->Contains(space_start) ? space_start : page->area_start();
     Address end = page->Contains(space_end) ? space_end : page->area_end();
     job.AddPage(page, std::make_pair(start, end));
   }
-  PointersUpdatingVisitor visitor(heap);
+  PointersUpdatingVisitor visitor;
   int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1;
   job.Run(num_tasks, [&visitor](int i) { return &visitor; });
 }
@@ -3690,7 +3785,7 @@
 void MarkCompactCollector::UpdatePointersAfterEvacuation() {
   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
 
-  PointersUpdatingVisitor updating_visitor(heap());
+  PointersUpdatingVisitor updating_visitor;
 
   {
     TRACE_GC(heap()->tracer(),
@@ -3741,7 +3836,7 @@
   int pages_freed = 0;
   Page* page = nullptr;
   while ((page = GetSweepingPageSafe(identity)) != nullptr) {
-    int freed = ParallelSweepPage(page, heap_->paged_space(identity));
+    int freed = ParallelSweepPage(page, identity);
     pages_freed += 1;
     DCHECK_GE(freed, 0);
     max_freed = Max(max_freed, freed);
@@ -3753,7 +3848,7 @@
 }
 
 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
-                                                     PagedSpace* space) {
+                                                     AllocationSpace identity) {
   int max_freed = 0;
   if (page->mutex()->TryLock()) {
     // If this page was already swept in the meantime, we can return here.
@@ -3762,19 +3857,25 @@
       return 0;
     }
     page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
-    if (space->identity() == OLD_SPACE) {
+    if (identity == NEW_SPACE) {
+      RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
+               IGNORE_FREE_LIST, IGNORE_FREE_SPACE>(nullptr, page, nullptr);
+    } else if (identity == OLD_SPACE) {
       max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
-                           IGNORE_FREE_SPACE>(space, page, NULL);
-    } else if (space->identity() == CODE_SPACE) {
+                           REBUILD_FREE_LIST, IGNORE_FREE_SPACE>(
+          heap_->paged_space(identity), page, nullptr);
+    } else if (identity == CODE_SPACE) {
       max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
-                           IGNORE_FREE_SPACE>(space, page, NULL);
+                           REBUILD_FREE_LIST, IGNORE_FREE_SPACE>(
+          heap_->paged_space(identity), page, nullptr);
     } else {
       max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
-                           IGNORE_FREE_SPACE>(space, page, NULL);
+                           REBUILD_FREE_LIST, IGNORE_FREE_SPACE>(
+          heap_->paged_space(identity), page, nullptr);
     }
     {
       base::LockGuard<base::Mutex> guard(&mutex_);
-      swept_list_[space->identity()].Add(page);
+      swept_list_[identity].Add(page);
     }
     page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
     page->mutex()->Unlock();
@@ -3800,7 +3901,8 @@
                                                          Page* page) {
   page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
   int to_sweep = page->area_size() - page->LiveBytes();
-  heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
+  if (space != NEW_SPACE)
+    heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
 }
 
 Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe(
@@ -3821,15 +3923,15 @@
 }
 
 void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
+  Address space_top = space->top();
   space->ClearStats();
 
-  PageIterator it(space);
-
   int will_be_swept = 0;
   bool unused_page_present = false;
 
-  while (it.has_next()) {
-    Page* p = it.next();
+  // Loop needs to support deletion if live bytes == 0 for a page.
+  for (auto it = space->begin(); it != space->end();) {
+    Page* p = *(it++);
     DCHECK(p->SweepingDone());
 
     if (p->IsEvacuationCandidate()) {
@@ -3844,7 +3946,15 @@
       Bitmap::Clear(p);
       p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
       p->ClearFlag(Page::BLACK_PAGE);
-      // TODO(hpayer): Free unused memory of last black page.
+      // Area above the high watermark is free.
+      Address free_start = p->HighWaterMark();
+      // Check if the space top was in this page, which means that the
+      // high watermark is not up-to-date.
+      if (free_start < space_top && space_top <= p->area_end()) {
+        free_start = space_top;
+      }
+      int size = static_cast<int>(p->area_end() - free_start);
+      space->Free(free_start, size);
       continue;
     }
 
@@ -3855,8 +3965,8 @@
       // testing this is fine.
       p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
       Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD,
-                        Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_SPACE>(
-          space, p, nullptr);
+                        Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_LIST,
+                        Sweeper::IGNORE_FREE_SPACE>(space, p, nullptr);
       continue;
     }
 
@@ -3864,8 +3974,10 @@
     if (p->LiveBytes() == 0) {
       if (unused_page_present) {
         if (FLAG_gc_verbose) {
-          PrintIsolate(isolate(), "sweeping: released page: %p", p);
+          PrintIsolate(isolate(), "sweeping: released page: %p",
+                       static_cast<void*>(p));
         }
+        ArrayBufferTracker::FreeAll(p);
         space->ReleasePage(p);
         continue;
       }
@@ -3936,7 +4048,10 @@
   Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
   if (target_page->IsEvacuationCandidate() &&
       !ShouldSkipEvacuationSlotRecording(host)) {
-    RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, CODE_ENTRY_SLOT, slot);
+    // TODO(ulan): remove this check after investigating crbug.com/414964.
+    CHECK(target->IsCode());
+    RememberedSet<OLD_TO_OLD>::InsertTyped(
+        source_page, reinterpret_cast<Address>(host), CODE_ENTRY_SLOT, slot);
   }
 }
 
diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h
index d6adb03..07b289e 100644
--- a/src/heap/mark-compact.h
+++ b/src/heap/mark-compact.h
@@ -408,6 +408,7 @@
 
     enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
     enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
+    enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
     enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
     enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
 
@@ -416,6 +417,7 @@
 
     template <SweepingMode sweeping_mode, SweepingParallelism parallelism,
               SkipListRebuildingMode skip_list_mode,
+              FreeListRebuildingMode free_list_mode,
               FreeSpaceTreatmentMode free_space_mode>
     static int RawSweep(PagedSpace* space, Page* p, ObjectVisitor* v);
 
@@ -434,11 +436,12 @@
 
     int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
                            int max_pages = 0);
-    int ParallelSweepPage(Page* page, PagedSpace* space);
+    int ParallelSweepPage(Page* page, AllocationSpace identity);
 
     void StartSweeping();
     void StartSweepingHelper(AllocationSpace space_to_start);
     void EnsureCompleted();
+    void EnsureNewSpaceCompleted();
     bool IsSweepingCompleted();
     void SweepOrWaitUntilSweepingCompleted(Page* page);
 
@@ -467,7 +470,7 @@
     SweepingList sweeping_list_[kAllocationSpaces];
     bool sweeping_in_progress_;
     bool late_pages_;
-    int num_sweeping_tasks_;
+    base::AtomicNumber<intptr_t> num_sweeping_tasks_;
   };
 
   enum IterationMode {
@@ -613,9 +616,7 @@
 
   Sweeper& sweeper() { return sweeper_; }
 
-  std::vector<std::pair<void*, void*>>& wrappers_to_trace() {
-    return wrappers_to_trace_;
-  }
+  void RegisterWrappersWithEmbedderHeapTracer();
 
   void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
 
@@ -793,7 +794,6 @@
   void SweepSpaces();
 
   void EvacuateNewSpacePrologue();
-  void EvacuateNewSpaceEpilogue();
 
   void EvacuatePagesInParallel();
 
diff --git a/src/heap/object-stats.cc b/src/heap/object-stats.cc
index 0198c6b..e7d90b3 100644
--- a/src/heap/object-stats.cc
+++ b/src/heap/object-stats.cc
@@ -134,8 +134,7 @@
 
 Isolate* ObjectStats::isolate() { return heap()->isolate(); }
 
-
-void ObjectStatsVisitor::CountFixedArray(
+void ObjectStatsCollector::CountFixedArray(
     FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
     FixedArraySubInstanceType dictionary_type) {
   Heap* heap = fixed_array->map()->GetHeap();
@@ -152,12 +151,32 @@
   }
 }
 
+void ObjectStatsCollector::CollectStatistics(StaticVisitorBase::VisitorId id,
+                                             Map* map, HeapObject* obj) {
+  // Record any type specific statistics here.
+  switch (id) {
+    case StaticVisitorBase::kVisitMap:
+      RecordMapStats(map, obj);
+      break;
+    case StaticVisitorBase::kVisitCode:
+      RecordCodeStats(map, obj);
+      break;
+    case StaticVisitorBase::kVisitSharedFunctionInfo:
+      RecordSharedFunctionInfoStats(map, obj);
+      break;
+    case StaticVisitorBase::kVisitFixedArray:
+      RecordFixedArrayStats(map, obj);
+      break;
+    default:
+      break;
+  }
 
-void ObjectStatsVisitor::VisitBase(VisitorId id, Map* map, HeapObject* obj) {
   Heap* heap = map->GetHeap();
   int object_size = obj->Size();
   heap->object_stats_->RecordObjectStats(map->instance_type(), object_size);
-  table_.GetVisitorById(id)(map, obj);
+}
+
+void ObjectStatsCollector::CollectFixedArrayStatistics(HeapObject* obj) {
   if (obj->IsJSObject()) {
     JSObject* object = JSObject::cast(obj);
     CountFixedArray(object->elements(), DICTIONARY_ELEMENTS_SUB_TYPE,
@@ -167,16 +186,7 @@
   }
 }
 
-
-template <ObjectStatsVisitor::VisitorId id>
-void ObjectStatsVisitor::Visit(Map* map, HeapObject* obj) {
-  VisitBase(id, map, obj);
-}
-
-
-template <>
-void ObjectStatsVisitor::Visit<ObjectStatsVisitor::kVisitMap>(Map* map,
-                                                              HeapObject* obj) {
+void ObjectStatsCollector::RecordMapStats(Map* map, HeapObject* obj) {
   Heap* heap = map->GetHeap();
   Map* map_obj = Map::cast(obj);
   DCHECK(map->instance_type() == MAP_TYPE);
@@ -187,54 +197,42 @@
                                                       fixed_array_size);
   }
   if (map_obj->has_code_cache()) {
-    FixedArray* cache = FixedArray::cast(map_obj->code_cache());
+    FixedArray* cache = map_obj->code_cache();
     heap->object_stats_->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE,
                                                       cache->Size());
   }
-  VisitBase(kVisitMap, map, obj);
 }
 
-
-template <>
-void ObjectStatsVisitor::Visit<ObjectStatsVisitor::kVisitCode>(
-    Map* map, HeapObject* obj) {
+void ObjectStatsCollector::RecordCodeStats(Map* map, HeapObject* obj) {
   Heap* heap = map->GetHeap();
   int object_size = obj->Size();
   DCHECK(map->instance_type() == CODE_TYPE);
   Code* code_obj = Code::cast(obj);
   heap->object_stats_->RecordCodeSubTypeStats(code_obj->kind(),
                                               code_obj->GetAge(), object_size);
-  VisitBase(kVisitCode, map, obj);
 }
 
-
-template <>
-void ObjectStatsVisitor::Visit<ObjectStatsVisitor::kVisitSharedFunctionInfo>(
-    Map* map, HeapObject* obj) {
+void ObjectStatsCollector::RecordSharedFunctionInfoStats(Map* map,
+                                                         HeapObject* obj) {
   Heap* heap = map->GetHeap();
   SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
   if (sfi->scope_info() != heap->empty_fixed_array()) {
     heap->object_stats_->RecordFixedArraySubTypeStats(
         SCOPE_INFO_SUB_TYPE, FixedArray::cast(sfi->scope_info())->Size());
   }
-  VisitBase(kVisitSharedFunctionInfo, map, obj);
 }
 
-
-template <>
-void ObjectStatsVisitor::Visit<ObjectStatsVisitor::kVisitFixedArray>(
-    Map* map, HeapObject* obj) {
+void ObjectStatsCollector::RecordFixedArrayStats(Map* map, HeapObject* obj) {
   Heap* heap = map->GetHeap();
   FixedArray* fixed_array = FixedArray::cast(obj);
   if (fixed_array == heap->string_table()) {
     heap->object_stats_->RecordFixedArraySubTypeStats(STRING_TABLE_SUB_TYPE,
                                                       fixed_array->Size());
   }
-  VisitBase(kVisitFixedArray, map, obj);
 }
 
-
-void ObjectStatsVisitor::Initialize(VisitorDispatchTable<Callback>* original) {
+void MarkCompactObjectStatsVisitor::Initialize(
+    VisitorDispatchTable<Callback>* original) {
   // Copy the original visitor table to make call-through possible. After we
   // preserved a copy locally, we patch the original table to call us.
   table_.CopyFrom(original);
@@ -243,5 +241,29 @@
 #undef COUNT_FUNCTION
 }
 
+template <MarkCompactObjectStatsVisitor::VisitorId id>
+void MarkCompactObjectStatsVisitor::Visit(Map* map, HeapObject* obj) {
+  ObjectStatsCollector::CollectStatistics(id, map, obj);
+  table_.GetVisitorById(id)(map, obj);
+  ObjectStatsCollector::CollectFixedArrayStatistics(obj);
+}
+
+void IncrementalMarkingObjectStatsVisitor::Initialize(
+    VisitorDispatchTable<Callback>* original) {
+  // Copy the original visitor table to make call-through possible. After we
+  // preserved a copy locally, we patch the original table to call us.
+  table_.CopyFrom(original);
+#define COUNT_FUNCTION(id) original->Register(kVisit##id, Visit<kVisit##id>);
+  VISITOR_ID_LIST(COUNT_FUNCTION)
+#undef COUNT_FUNCTION
+}
+
+template <IncrementalMarkingObjectStatsVisitor::VisitorId id>
+void IncrementalMarkingObjectStatsVisitor::Visit(Map* map, HeapObject* obj) {
+  ObjectStatsCollector::CollectStatistics(id, map, obj);
+  table_.GetVisitorById(id)(map, obj);
+  ObjectStatsCollector::CollectFixedArrayStatistics(obj);
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/heap/object-stats.h b/src/heap/object-stats.h
index e2dcfaa..ce0a317 100644
--- a/src/heap/object-stats.h
+++ b/src/heap/object-stats.h
@@ -81,16 +81,34 @@
   size_t object_sizes_last_time_[OBJECT_STATS_COUNT];
 };
 
-
-class ObjectStatsVisitor : public StaticMarkingVisitor<ObjectStatsVisitor> {
+class ObjectStatsCollector {
  public:
-  static void Initialize(VisitorDispatchTable<Callback>* original);
-
-  static void VisitBase(VisitorId id, Map* map, HeapObject* obj);
+  static void CollectStatistics(StaticVisitorBase::VisitorId id, Map* map,
+                                HeapObject* obj);
+  static void CollectFixedArrayStatistics(HeapObject* obj);
 
   static void CountFixedArray(FixedArrayBase* fixed_array,
                               FixedArraySubInstanceType fast_type,
                               FixedArraySubInstanceType dictionary_type);
+  static void RecordMapStats(Map* map, HeapObject* obj);
+  static void RecordCodeStats(Map* map, HeapObject* obj);
+  static void RecordSharedFunctionInfoStats(Map* map, HeapObject* obj);
+  static void RecordFixedArrayStats(Map* map, HeapObject* obj);
+};
+
+class MarkCompactObjectStatsVisitor
+    : public StaticMarkingVisitor<MarkCompactObjectStatsVisitor> {
+ public:
+  static void Initialize(VisitorDispatchTable<Callback>* original);
+
+  template <VisitorId id>
+  static inline void Visit(Map* map, HeapObject* obj);
+};
+
+class IncrementalMarkingObjectStatsVisitor
+    : public StaticMarkingVisitor<IncrementalMarkingObjectStatsVisitor> {
+ public:
+  static void Initialize(VisitorDispatchTable<Callback>* original);
 
   template <VisitorId id>
   static inline void Visit(Map* map, HeapObject* obj);
diff --git a/src/heap/objects-visiting-inl.h b/src/heap/objects-visiting-inl.h
index 4373451..6d26ad0 100644
--- a/src/heap/objects-visiting-inl.h
+++ b/src/heap/objects-visiting-inl.h
@@ -77,7 +77,10 @@
       &FlexibleBodyVisitor<StaticVisitor, JSFunction::BodyDescriptorWeakCode,
                            int>::Visit);
 
-  table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer);
+  table_.Register(
+      kVisitJSArrayBuffer,
+      &FlexibleBodyVisitor<StaticVisitor, JSArrayBuffer::BodyDescriptor,
+                           int>::Visit);
 
   table_.Register(kVisitFreeSpace, &VisitFreeSpace);
 
@@ -99,21 +102,6 @@
                                           kVisitStructGeneric>();
 }
 
-
-template <typename StaticVisitor>
-int StaticNewSpaceVisitor<StaticVisitor>::VisitJSArrayBuffer(
-    Map* map, HeapObject* object) {
-  typedef FlexibleBodyVisitor<StaticVisitor, JSArrayBuffer::BodyDescriptor, int>
-      JSArrayBufferBodyVisitor;
-
-  if (!JSArrayBuffer::cast(object)->is_external()) {
-    Heap* heap = map->GetHeap();
-    heap->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(object));
-  }
-  return JSArrayBufferBodyVisitor::Visit(map, object);
-}
-
-
 template <typename StaticVisitor>
 int StaticNewSpaceVisitor<StaticVisitor>::VisitBytecodeArray(
     Map* map, HeapObject* object) {
@@ -185,7 +173,10 @@
 
   table_.Register(kVisitJSFunction, &VisitJSFunction);
 
-  table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer);
+  table_.Register(
+      kVisitJSArrayBuffer,
+      &FlexibleBodyVisitor<StaticVisitor, JSArrayBuffer::BodyDescriptor,
+                           void>::Visit);
 
   // Registration for kVisitJSRegExp is done by StaticVisitor.
 
@@ -385,7 +376,7 @@
   }
   // Enqueue the array in linked list of encountered transition arrays if it is
   // not already in the list.
-  if (array->next_link()->IsUndefined()) {
+  if (array->next_link()->IsUndefined(heap->isolate())) {
     Heap* heap = map->GetHeap();
     array->set_next_link(heap->encountered_transition_arrays(),
                          UPDATE_WEAK_WRITE_BARRIER);
@@ -459,9 +450,6 @@
   if (shared->ic_age() != heap->global_ic_age()) {
     shared->ResetForNewContext(heap->global_ic_age());
   }
-  if (FLAG_cleanup_code_caches_at_gc) {
-    shared->ClearTypeFeedbackInfoAtGCTime();
-  }
   if (FLAG_flush_optimized_code_cache) {
     if (!shared->OptimizedCodeMapIsCleared()) {
       // Always flush the optimized code map if requested by flag.
@@ -492,6 +480,9 @@
                                                           HeapObject* object) {
   Heap* heap = map->GetHeap();
   JSFunction* function = JSFunction::cast(object);
+  if (FLAG_cleanup_code_caches_at_gc) {
+    function->ClearTypeFeedbackInfoAtGCTime();
+  }
   MarkCompactCollector* collector = heap->mark_compact_collector();
   if (collector->is_code_flushing_enabled()) {
     if (IsFlushable(heap, function)) {
@@ -520,24 +511,6 @@
   JSObjectVisitor::Visit(map, object);
 }
 
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSArrayBuffer(
-    Map* map, HeapObject* object) {
-  Heap* heap = map->GetHeap();
-
-  typedef FlexibleBodyVisitor<StaticVisitor, JSArrayBuffer::BodyDescriptor,
-                              void> JSArrayBufferBodyVisitor;
-
-  JSArrayBufferBodyVisitor::Visit(map, object);
-
-  if (!JSArrayBuffer::cast(object)->is_external() &&
-      !heap->InNewSpace(object)) {
-    heap->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(object));
-  }
-}
-
-
 template <typename StaticVisitor>
 void StaticMarkingVisitor<StaticVisitor>::VisitBytecodeArray(
     Map* map, HeapObject* object) {
@@ -647,9 +620,10 @@
     return false;
   }
 
-  // We do not (yet?) flush code for generator functions, because we don't know
-  // if there are still live activations (generator objects) on the heap.
-  if (shared_info->is_generator()) {
+  // We do not (yet?) flush code for generator functions, or async functions,
+  // because we don't know if there are still live activations
+  // (generator objects) on the heap.
+  if (shared_info->is_resumable()) {
     return false;
   }
 
diff --git a/src/heap/objects-visiting.cc b/src/heap/objects-visiting.cc
index dfde574..83e2e1c 100644
--- a/src/heap/objects-visiting.cc
+++ b/src/heap/objects-visiting.cc
@@ -103,6 +103,8 @@
       return kVisitJSArrayBuffer;
 
     case JS_OBJECT_TYPE:
+    case JS_ERROR_TYPE:
+    case JS_ARGUMENTS_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_GENERATOR_OBJECT_TYPE:
     case JS_MODULE_TYPE:
@@ -212,7 +214,7 @@
         }
       }
       // Retained object is new tail.
-      DCHECK(!retained->IsUndefined());
+      DCHECK(!retained->IsUndefined(heap->isolate()));
       candidate = reinterpret_cast<T*>(retained);
       tail = candidate;
 
diff --git a/src/heap/objects-visiting.h b/src/heap/objects-visiting.h
index 4be40cd..303db0e 100644
--- a/src/heap/objects-visiting.h
+++ b/src/heap/objects-visiting.h
@@ -300,7 +300,6 @@
     return FreeSpace::cast(object)->size();
   }
 
-  INLINE(static int VisitJSArrayBuffer(Map* map, HeapObject* object));
   INLINE(static int VisitBytecodeArray(Map* map, HeapObject* object));
 
   class DataObjectVisitor {
@@ -379,7 +378,6 @@
   INLINE(static void VisitWeakCollection(Map* map, HeapObject* object));
   INLINE(static void VisitJSFunction(Map* map, HeapObject* object));
   INLINE(static void VisitJSRegExp(Map* map, HeapObject* object));
-  INLINE(static void VisitJSArrayBuffer(Map* map, HeapObject* object));
   INLINE(static void VisitNativeContext(Map* map, HeapObject* object));
   INLINE(static void VisitBytecodeArray(Map* map, HeapObject* object));
 
diff --git a/src/heap/remembered-set.cc b/src/heap/remembered-set.cc
index 403c99b..0bc5e6e 100644
--- a/src/heap/remembered-set.cc
+++ b/src/heap/remembered-set.cc
@@ -16,10 +16,7 @@
 template <PointerDirection direction>
 void RememberedSet<direction>::ClearInvalidSlots(Heap* heap) {
   STATIC_ASSERT(direction == OLD_TO_NEW);
-  PageIterator it(heap->old_space());
-  MemoryChunk* chunk;
-  while (it.has_next()) {
-    chunk = it.next();
+  for (MemoryChunk* chunk : *heap->old_space()) {
     SlotSet* slots = GetSlotSet(chunk);
     if (slots != nullptr) {
       slots->Iterate([heap, chunk](Address addr) {
diff --git a/src/heap/remembered-set.h b/src/heap/remembered-set.h
index 45408bf..339748c 100644
--- a/src/heap/remembered-set.h
+++ b/src/heap/remembered-set.h
@@ -5,6 +5,7 @@
 #ifndef V8_REMEMBERED_SET_H
 #define V8_REMEMBERED_SET_H
 
+#include "src/assembler.h"
 #include "src/heap/heap.h"
 #include "src/heap/slot-set.h"
 #include "src/heap/spaces.h"
@@ -14,6 +15,7 @@
 
 enum PointerDirection { OLD_TO_OLD, OLD_TO_NEW };
 
+// TODO(ulan): Investigate performance of de-templatizing this class.
 template <PointerDirection direction>
 class RememberedSet {
  public:
@@ -67,9 +69,7 @@
   // The callback should take (MemoryChunk* chunk) and return void.
   template <typename Callback>
   static void IterateMemoryChunks(Heap* heap, Callback callback) {
-    MemoryChunkIterator it(heap, direction == OLD_TO_OLD
-                                     ? MemoryChunkIterator::ALL
-                                     : MemoryChunkIterator::ALL_BUT_CODE_SPACE);
+    MemoryChunkIterator it(heap);
     MemoryChunk* chunk;
     while ((chunk = it.next()) != nullptr) {
       SlotSet* slots = GetSlotSet(chunk);
@@ -98,62 +98,58 @@
     }
   }
 
-  // Iterates and filters the remembered set with the given callback.
-  // The callback should take (HeapObject** slot, HeapObject* target) and
-  // update the slot.
-  // A special wrapper takes care of filtering the slots based on their values.
-  // For OLD_TO_NEW case: slots that do not point to the ToSpace after
-  // callback invocation will be removed from the set.
-  template <typename Callback>
-  static void IterateWithWrapper(Heap* heap, Callback callback) {
-    Iterate(heap, [heap, callback](Address addr) {
-      return Wrapper(heap, addr, callback);
-    });
-  }
-
-  template <typename Callback>
-  static void IterateWithWrapper(Heap* heap, MemoryChunk* chunk,
-                                 Callback callback) {
-    Iterate(chunk, [heap, callback](Address addr) {
-      return Wrapper(heap, addr, callback);
-    });
-  }
-
   // Given a page and a typed slot in that page, this function adds the slot
   // to the remembered set.
-  static void InsertTyped(Page* page, SlotType slot_type, Address slot_addr) {
-    STATIC_ASSERT(direction == OLD_TO_OLD);
-    TypedSlotSet* slot_set = page->typed_old_to_old_slots();
+  static void InsertTyped(Page* page, Address host_addr, SlotType slot_type,
+                          Address slot_addr) {
+    TypedSlotSet* slot_set = GetTypedSlotSet(page);
     if (slot_set == nullptr) {
-      page->AllocateTypedOldToOldSlots();
-      slot_set = page->typed_old_to_old_slots();
+      AllocateTypedSlotSet(page);
+      slot_set = GetTypedSlotSet(page);
+    }
+    if (host_addr == nullptr) {
+      host_addr = page->address();
     }
     uintptr_t offset = slot_addr - page->address();
+    uintptr_t host_offset = host_addr - page->address();
     DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
-    slot_set->Insert(slot_type, static_cast<uint32_t>(offset));
+    DCHECK_LT(host_offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
+    slot_set->Insert(slot_type, static_cast<uint32_t>(host_offset),
+                     static_cast<uint32_t>(offset));
   }
 
   // Given a page and a range of typed slots in that page, this function removes
   // the slots from the remembered set.
   static void RemoveRangeTyped(Page* page, Address start, Address end) {
-    TypedSlotSet* slots = page->typed_old_to_old_slots();
+    TypedSlotSet* slots = GetTypedSlotSet(page);
     if (slots != nullptr) {
-      slots->Iterate([start, end](SlotType slot_type, Address slot_addr) {
+      slots->Iterate([start, end](SlotType slot_type, Address host_addr,
+                                  Address slot_addr) {
         return start <= slot_addr && slot_addr < end ? REMOVE_SLOT : KEEP_SLOT;
       });
     }
   }
 
+  // Iterates and filters the remembered set with the given callback.
+  // The callback should take (SlotType slot_type, SlotAddress slot) and return
+  // SlotCallbackResult.
+  template <typename Callback>
+  static void IterateTyped(Heap* heap, Callback callback) {
+    IterateMemoryChunks(heap, [callback](MemoryChunk* chunk) {
+      IterateTyped(chunk, callback);
+    });
+  }
+
   // Iterates and filters typed old to old pointers in the given memory chunk
   // with the given callback. The callback should take (SlotType slot_type,
   // Address slot_addr) and return SlotCallbackResult.
   template <typename Callback>
   static void IterateTyped(MemoryChunk* chunk, Callback callback) {
-    TypedSlotSet* slots = chunk->typed_old_to_old_slots();
+    TypedSlotSet* slots = GetTypedSlotSet(chunk);
     if (slots != nullptr) {
       int new_count = slots->Iterate(callback);
       if (new_count == 0) {
-        chunk->ReleaseTypedOldToOldSlots();
+        ReleaseTypedSlotSet(chunk);
       }
     }
   }
@@ -161,7 +157,7 @@
   // Clear all old to old slots from the remembered set.
   static void ClearAll(Heap* heap) {
     STATIC_ASSERT(direction == OLD_TO_OLD);
-    MemoryChunkIterator it(heap, MemoryChunkIterator::ALL);
+    MemoryChunkIterator it(heap);
     MemoryChunk* chunk;
     while ((chunk = it.next()) != nullptr) {
       chunk->ReleaseOldToOldSlots();
@@ -190,7 +186,7 @@
     if (direction == OLD_TO_OLD) {
       return chunk->typed_old_to_old_slots();
     } else {
-      return nullptr;
+      return chunk->typed_old_to_new_slots();
     }
   }
 
@@ -202,6 +198,14 @@
     }
   }
 
+  static void ReleaseTypedSlotSet(MemoryChunk* chunk) {
+    if (direction == OLD_TO_OLD) {
+      chunk->ReleaseTypedOldToOldSlots();
+    } else {
+      chunk->ReleaseTypedOldToNewSlots();
+    }
+  }
+
   static SlotSet* AllocateSlotSet(MemoryChunk* chunk) {
     if (direction == OLD_TO_OLD) {
       chunk->AllocateOldToOldSlots();
@@ -212,33 +216,135 @@
     }
   }
 
-  template <typename Callback>
-  static SlotCallbackResult Wrapper(Heap* heap, Address slot_address,
-                                    Callback slot_callback) {
-    STATIC_ASSERT(direction == OLD_TO_NEW);
-    Object** slot = reinterpret_cast<Object**>(slot_address);
-    Object* object = *slot;
-    if (heap->InFromSpace(object)) {
-      HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
-      DCHECK(heap_object->IsHeapObject());
-      slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
-      object = *slot;
-      // If the object was in from space before and is after executing the
-      // callback in to space, the object is still live.
-      // Unfortunately, we do not know about the slot. It could be in a
-      // just freed free space object.
-      if (heap->InToSpace(object)) {
-        return KEEP_SLOT;
-      }
+  static TypedSlotSet* AllocateTypedSlotSet(MemoryChunk* chunk) {
+    if (direction == OLD_TO_OLD) {
+      chunk->AllocateTypedOldToOldSlots();
+      return chunk->typed_old_to_old_slots();
     } else {
-      DCHECK(!heap->InNewSpace(object));
+      chunk->AllocateTypedOldToNewSlots();
+      return chunk->typed_old_to_new_slots();
     }
-    return REMOVE_SLOT;
   }
 
   static bool IsValidSlot(Heap* heap, MemoryChunk* chunk, Object** slot);
 };
 
+class UpdateTypedSlotHelper {
+ public:
+  // Updates a cell slot using an untyped slot callback.
+  // The callback accepts (Heap*, Object**) and returns SlotCallbackResult.
+  template <typename Callback>
+  static SlotCallbackResult UpdateCell(RelocInfo* rinfo, Callback callback) {
+    DCHECK(rinfo->rmode() == RelocInfo::CELL);
+    Object* cell = rinfo->target_cell();
+    Object* old_cell = cell;
+    SlotCallbackResult result = callback(&cell);
+    if (cell != old_cell) {
+      rinfo->set_target_cell(reinterpret_cast<Cell*>(cell));
+    }
+    return result;
+  }
+
+  // Updates a code entry slot using an untyped slot callback.
+  // The callback accepts (Heap*, Object**) and returns SlotCallbackResult.
+  template <typename Callback>
+  static SlotCallbackResult UpdateCodeEntry(Address entry_address,
+                                            Callback callback) {
+    Object* code = Code::GetObjectFromEntryAddress(entry_address);
+    Object* old_code = code;
+    SlotCallbackResult result = callback(&code);
+    if (code != old_code) {
+      Memory::Address_at(entry_address) =
+          reinterpret_cast<Code*>(code)->entry();
+    }
+    return result;
+  }
+
+  // Updates a code target slot using an untyped slot callback.
+  // The callback accepts (Heap*, Object**) and returns SlotCallbackResult.
+  template <typename Callback>
+  static SlotCallbackResult UpdateCodeTarget(RelocInfo* rinfo,
+                                             Callback callback) {
+    DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
+    Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+    Object* old_target = target;
+    SlotCallbackResult result = callback(&target);
+    if (target != old_target) {
+      rinfo->set_target_address(Code::cast(target)->instruction_start());
+    }
+    return result;
+  }
+
+  // Updates an embedded pointer slot using an untyped slot callback.
+  // The callback accepts (Heap*, Object**) and returns SlotCallbackResult.
+  template <typename Callback>
+  static SlotCallbackResult UpdateEmbeddedPointer(RelocInfo* rinfo,
+                                                  Callback callback) {
+    DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+    Object* target = rinfo->target_object();
+    Object* old_target = target;
+    SlotCallbackResult result = callback(&target);
+    if (target != old_target) {
+      rinfo->set_target_object(target);
+    }
+    return result;
+  }
+
+  // Updates a debug target slot using an untyped slot callback.
+  // The callback accepts (Heap*, Object**) and returns SlotCallbackResult.
+  template <typename Callback>
+  static SlotCallbackResult UpdateDebugTarget(RelocInfo* rinfo,
+                                              Callback callback) {
+    DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+           rinfo->IsPatchedDebugBreakSlotSequence());
+    Object* target =
+        Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
+    SlotCallbackResult result = callback(&target);
+    rinfo->set_debug_call_address(Code::cast(target)->instruction_start());
+    return result;
+  }
+
+  // Updates a typed slot using an untyped slot callback.
+  // The callback accepts (Heap*, Object**) and returns SlotCallbackResult.
+  template <typename Callback>
+  static SlotCallbackResult UpdateTypedSlot(Isolate* isolate,
+                                            SlotType slot_type, Address addr,
+                                            Callback callback) {
+    switch (slot_type) {
+      case CODE_TARGET_SLOT: {
+        RelocInfo rinfo(isolate, addr, RelocInfo::CODE_TARGET, 0, NULL);
+        return UpdateCodeTarget(&rinfo, callback);
+      }
+      case CELL_TARGET_SLOT: {
+        RelocInfo rinfo(isolate, addr, RelocInfo::CELL, 0, NULL);
+        return UpdateCell(&rinfo, callback);
+      }
+      case CODE_ENTRY_SLOT: {
+        return UpdateCodeEntry(addr, callback);
+      }
+      case DEBUG_TARGET_SLOT: {
+        RelocInfo rinfo(isolate, addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION,
+                        0, NULL);
+        if (rinfo.IsPatchedDebugBreakSlotSequence()) {
+          return UpdateDebugTarget(&rinfo, callback);
+        }
+        return REMOVE_SLOT;
+      }
+      case EMBEDDED_OBJECT_SLOT: {
+        RelocInfo rinfo(isolate, addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
+        return UpdateEmbeddedPointer(&rinfo, callback);
+      }
+      case OBJECT_SLOT: {
+        return callback(reinterpret_cast<Object**>(addr));
+      }
+      case NUMBER_OF_SLOT_TYPES:
+        break;
+    }
+    UNREACHABLE();
+    return REMOVE_SLOT;
+  }
+};
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/heap/scavenger-inl.h b/src/heap/scavenger-inl.h
index b8fd1c8..0b6a0f4 100644
--- a/src/heap/scavenger-inl.h
+++ b/src/heap/scavenger-inl.h
@@ -37,10 +37,35 @@
   return ScavengeObjectSlow(p, object);
 }
 
+SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap,
+                                                     Address slot_address) {
+  Object** slot = reinterpret_cast<Object**>(slot_address);
+  Object* object = *slot;
+  if (heap->InFromSpace(object)) {
+    HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+    DCHECK(heap_object->IsHeapObject());
+
+    ScavengeObject(reinterpret_cast<HeapObject**>(slot), heap_object);
+
+    object = *slot;
+    // If the object was in from space before and is after executing the
+    // callback in to space, the object is still live.
+    // Unfortunately, we do not know about the slot. It could be in a
+    // just freed free space object.
+    if (heap->InToSpace(object)) {
+      return KEEP_SLOT;
+    }
+  } else {
+    DCHECK(!heap->InNewSpace(object));
+  }
+  return REMOVE_SLOT;
+}
 
 // static
-void StaticScavengeVisitor::VisitPointer(Heap* heap, HeapObject* obj,
-                                         Object** p) {
+template <PromotionMode promotion_mode>
+void StaticScavengeVisitor<promotion_mode>::VisitPointer(Heap* heap,
+                                                         HeapObject* obj,
+                                                         Object** p) {
   Object* object = *p;
   if (!heap->InNewSpace(object)) return;
   Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(p),
diff --git a/src/heap/scavenger.cc b/src/heap/scavenger.cc
index 456d8a4..9b8bfc2 100644
--- a/src/heap/scavenger.cc
+++ b/src/heap/scavenger.cc
@@ -10,7 +10,6 @@
 #include "src/heap/scavenger-inl.h"
 #include "src/isolate.h"
 #include "src/log.h"
-#include "src/profiler/cpu-profiler.h"
 
 namespace v8 {
 namespace internal {
@@ -23,8 +22,7 @@
 
 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
 
-
-template <MarksHandling marks_handling,
+template <MarksHandling marks_handling, PromotionMode promotion_mode,
           LoggingAndProfiling logging_and_profiling_mode>
 class ScavengingVisitor : public StaticVisitorBase {
  public:
@@ -37,7 +35,8 @@
     table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
     table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
     table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
-    table_.Register(kVisitJSArrayBuffer, &EvacuateJSArrayBuffer);
+    table_.Register(kVisitJSArrayBuffer,
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
 
     table_.Register(
         kVisitNativeContext,
@@ -200,7 +199,6 @@
     return false;
   }
 
-
   template <ObjectContents object_contents, AllocationAlignment alignment>
   static inline void EvacuateObject(Map* map, HeapObject** slot,
                                     HeapObject* object, int object_size) {
@@ -208,7 +206,8 @@
     SLOW_DCHECK(object->Size() == object_size);
     Heap* heap = map->GetHeap();
 
-    if (!heap->ShouldBePromoted(object->address(), object_size)) {
+    if (!heap->ShouldBePromoted<promotion_mode>(object->address(),
+                                                object_size)) {
       // A semi-space copy may fail due to fragmentation. In that case, we
       // try to promote the object.
       if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
@@ -220,14 +219,15 @@
                                                   object_size)) {
       return;
     }
-
+    if (promotion_mode == PROMOTE_MARKED) {
+      FatalProcessOutOfMemory("Scavenger: promoting marked\n");
+    }
     // If promotion failed, we try to copy the object to the other semi-space
     if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
 
     FatalProcessOutOfMemory("Scavenger: semi-space copy\n");
   }
 
-
   static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
                                         HeapObject* object) {
     ObjectEvacuationStrategy<POINTER_OBJECT>::Visit(map, slot, object);
@@ -252,7 +252,6 @@
     }
   }
 
-
   static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
                                         HeapObject* object) {
     int length = reinterpret_cast<FixedArray*>(object)->synchronized_length();
@@ -261,7 +260,6 @@
                                                  object_size);
   }
 
-
   static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
                                               HeapObject* object) {
     int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
@@ -269,7 +267,6 @@
     EvacuateObject<DATA_OBJECT, kDoubleAligned>(map, slot, object, object_size);
   }
 
-
   static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
                                              HeapObject* object) {
     int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
@@ -277,7 +274,6 @@
                                                  object_size);
   }
 
-
   static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
                                                HeapObject* object) {
     int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
@@ -285,28 +281,12 @@
                                                    object_size);
   }
 
-
-  static inline void EvacuateJSArrayBuffer(Map* map, HeapObject** slot,
-                                           HeapObject* object) {
-    ObjectEvacuationStrategy<POINTER_OBJECT>::Visit(map, slot, object);
-
-    Heap* heap = map->GetHeap();
-    MapWord map_word = object->map_word();
-    DCHECK(map_word.IsForwardingAddress());
-    HeapObject* target = map_word.ToForwardingAddress();
-    if (!heap->InNewSpace(target)) {
-      heap->array_buffer_tracker()->Promote(JSArrayBuffer::cast(target));
-    }
-  }
-
-
   static inline void EvacuateByteArray(Map* map, HeapObject** slot,
                                        HeapObject* object) {
     int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
     EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
   }
 
-
   static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
                                               HeapObject* object) {
     int object_size = SeqOneByteString::cast(object)
@@ -314,7 +294,6 @@
     EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
   }
 
-
   static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
                                               HeapObject* object) {
     int object_size = SeqTwoByteString::cast(object)
@@ -322,7 +301,6 @@
     EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
   }
 
-
   static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
                                                HeapObject* object) {
     DCHECK(IsShortcutCandidate(map->instance_type()));
@@ -380,21 +358,21 @@
   static VisitorDispatchTable<ScavengingCallback> table_;
 };
 
-
-template <MarksHandling marks_handling,
+template <MarksHandling marks_handling, PromotionMode promotion_mode,
           LoggingAndProfiling logging_and_profiling_mode>
-VisitorDispatchTable<ScavengingCallback>
-    ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
-
+VisitorDispatchTable<ScavengingCallback> ScavengingVisitor<
+    marks_handling, promotion_mode, logging_and_profiling_mode>::table_;
 
 // static
 void Scavenger::Initialize() {
-  ScavengingVisitor<TRANSFER_MARKS,
+  ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
                     LOGGING_AND_PROFILING_DISABLED>::Initialize();
-  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
-  ScavengingVisitor<TRANSFER_MARKS,
+  ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
+                    LOGGING_AND_PROFILING_DISABLED>::Initialize();
+  ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
                     LOGGING_AND_PROFILING_ENABLED>::Initialize();
-  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
+  ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
+                    LOGGING_AND_PROFILING_ENABLED>::Initialize();
 }
 
 
@@ -412,28 +390,28 @@
 void Scavenger::SelectScavengingVisitorsTable() {
   bool logging_and_profiling =
       FLAG_verify_predictable || isolate()->logger()->is_logging() ||
-      isolate()->cpu_profiler()->is_profiling() ||
+      isolate()->is_profiling() ||
       (isolate()->heap_profiler() != NULL &&
        isolate()->heap_profiler()->is_tracking_object_moves());
 
   if (!heap()->incremental_marking()->IsMarking()) {
     if (!logging_and_profiling) {
       scavenging_visitors_table_.CopyFrom(
-          ScavengingVisitor<IGNORE_MARKS,
+          ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
     } else {
       scavenging_visitors_table_.CopyFrom(
-          ScavengingVisitor<IGNORE_MARKS,
+          ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
     }
   } else {
     if (!logging_and_profiling) {
       scavenging_visitors_table_.CopyFrom(
-          ScavengingVisitor<TRANSFER_MARKS,
+          ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
     } else {
       scavenging_visitors_table_.CopyFrom(
-          ScavengingVisitor<TRANSFER_MARKS,
+          ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
     }
 
@@ -467,6 +445,8 @@
   Object* object = *p;
   if (!heap_->InNewSpace(object)) return;
 
+  if (heap_->PurgeLeftTrimmedObject(p)) return;
+
   Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(p),
                             reinterpret_cast<HeapObject*>(object));
 }
diff --git a/src/heap/scavenger.h b/src/heap/scavenger.h
index 5d0abf4..f2213b8 100644
--- a/src/heap/scavenger.h
+++ b/src/heap/scavenger.h
@@ -6,6 +6,7 @@
 #define V8_HEAP_SCAVENGER_H_
 
 #include "src/heap/objects-visiting.h"
+#include "src/heap/slot-set.h"
 
 namespace v8 {
 namespace internal {
@@ -25,6 +26,8 @@
   // ensure the precondition that the object is (a) a heap object and (b) in
   // the heap's from space.
   static inline void ScavengeObject(HeapObject** p, HeapObject* object);
+  static inline SlotCallbackResult CheckAndScavengeObject(Heap* heap,
+                                                          Address slot_address);
 
   // Slow part of {ScavengeObject} above.
   static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
@@ -60,8 +63,9 @@
 
 // Helper class for turning the scavenger into an object visitor that is also
 // filtering out non-HeapObjects and objects which do not reside in new space.
+template <PromotionMode promotion_mode>
 class StaticScavengeVisitor
-    : public StaticNewSpaceVisitor<StaticScavengeVisitor> {
+    : public StaticNewSpaceVisitor<StaticScavengeVisitor<promotion_mode>> {
  public:
   static inline void VisitPointer(Heap* heap, HeapObject* object, Object** p);
 };
diff --git a/src/heap/slot-set.h b/src/heap/slot-set.h
index e55ffe9..2fac50f 100644
--- a/src/heap/slot-set.h
+++ b/src/heap/slot-set.h
@@ -217,7 +217,6 @@
 enum SlotType {
   EMBEDDED_OBJECT_SLOT,
   OBJECT_SLOT,
-  RELOCATED_CODE_OBJECT,
   CELL_TARGET_SLOT,
   CODE_TARGET_SLOT,
   CODE_ENTRY_SLOT,
@@ -234,7 +233,30 @@
 // typed slots contain V8 internal pointers that are not directly exposed to JS.
 class TypedSlotSet {
  public:
-  typedef uint32_t TypedSlot;
+  struct TypedSlot {
+    TypedSlot() : type_and_offset_(0), host_offset_(0) {}
+
+    TypedSlot(SlotType type, uint32_t host_offset, uint32_t offset)
+        : type_and_offset_(TypeField::encode(type) |
+                           OffsetField::encode(offset)),
+          host_offset_(host_offset) {}
+
+    bool operator==(const TypedSlot other) {
+      return type_and_offset_ == other.type_and_offset_ &&
+             host_offset_ == other.host_offset_;
+    }
+
+    bool operator!=(const TypedSlot other) { return !(*this == other); }
+
+    SlotType type() { return TypeField::decode(type_and_offset_); }
+
+    uint32_t offset() { return OffsetField::decode(type_and_offset_); }
+
+    uint32_t host_offset() { return host_offset_; }
+
+    uint32_t type_and_offset_;
+    uint32_t host_offset_;
+  };
   static const int kMaxOffset = 1 << 29;
 
   explicit TypedSlotSet(Address page_start) : page_start_(page_start) {
@@ -251,8 +273,8 @@
   }
 
   // The slot offset specifies a slot at address page_start_ + offset.
-  void Insert(SlotType type, int offset) {
-    TypedSlot slot = ToTypedSlot(type, offset);
+  void Insert(SlotType type, uint32_t host_offset, uint32_t offset) {
+    TypedSlot slot(type, host_offset, offset);
     if (!chunk_->AddSlot(slot)) {
       chunk_ = new Chunk(chunk_, NextCapacity(chunk_->capacity));
       bool added = chunk_->AddSlot(slot);
@@ -273,7 +295,7 @@
   template <typename Callback>
   int Iterate(Callback callback) {
     STATIC_ASSERT(NUMBER_OF_SLOT_TYPES < 8);
-    const TypedSlot kRemovedSlot = TypeField::encode(NUMBER_OF_SLOT_TYPES);
+    const TypedSlot kRemovedSlot(NUMBER_OF_SLOT_TYPES, 0, 0);
     Chunk* chunk = chunk_;
     int new_count = 0;
     while (chunk != nullptr) {
@@ -282,9 +304,10 @@
       for (int i = 0; i < count; i++) {
         TypedSlot slot = buffer[i];
         if (slot != kRemovedSlot) {
-          SlotType type = TypeField::decode(slot);
-          Address addr = page_start_ + OffsetField::decode(slot);
-          if (callback(type, addr) == KEEP_SLOT) {
+          SlotType type = slot.type();
+          Address addr = page_start_ + slot.offset();
+          Address host_addr = page_start_ + slot.host_offset();
+          if (callback(type, host_addr, addr) == KEEP_SLOT) {
             new_count++;
           } else {
             buffer[i] = kRemovedSlot;
@@ -304,10 +327,6 @@
     return Min(kMaxBufferSize, capacity * 2);
   }
 
-  static TypedSlot ToTypedSlot(SlotType type, int offset) {
-    return TypeField::encode(type) | OffsetField::encode(offset);
-  }
-
   class OffsetField : public BitField<int, 0, 29> {};
   class TypeField : public BitField<SlotType, 29, 3> {};
 
diff --git a/src/heap/spaces-inl.h b/src/heap/spaces-inl.h
index f9e40bb..dbf3fff 100644
--- a/src/heap/spaces-inl.h
+++ b/src/heap/spaces-inl.h
@@ -15,6 +15,24 @@
 namespace v8 {
 namespace internal {
 
+template <class PAGE_TYPE>
+PageIteratorImpl<PAGE_TYPE>& PageIteratorImpl<PAGE_TYPE>::operator++() {
+  p_ = p_->next_page();
+  return *this;
+}
+
+template <class PAGE_TYPE>
+PageIteratorImpl<PAGE_TYPE> PageIteratorImpl<PAGE_TYPE>::operator++(int) {
+  PageIteratorImpl<PAGE_TYPE> tmp(*this);
+  operator++();
+  return tmp;
+}
+
+NewSpacePageRange::NewSpacePageRange(Address start, Address limit)
+    : range_(Page::FromAddress(start),
+             Page::FromAllocationAreaAddress(limit)->next_page()) {
+  SemiSpace::AssertValidRange(start, limit);
+}
 
 // -----------------------------------------------------------------------------
 // Bitmap
@@ -31,25 +49,6 @@
     bitmap->cells()[i] = 0xffffffff;
 }
 
-// -----------------------------------------------------------------------------
-// PageIterator
-
-PageIterator::PageIterator(PagedSpace* space)
-    : space_(space),
-      prev_page_(&space->anchor_),
-      next_page_(prev_page_->next_page()) {}
-
-
-bool PageIterator::has_next() { return next_page_ != &space_->anchor_; }
-
-
-Page* PageIterator::next() {
-  DCHECK(has_next());
-  prev_page_ = next_page_;
-  next_page_ = next_page_->next_page();
-  return prev_page_;
-}
-
 
 // -----------------------------------------------------------------------------
 // SemiSpaceIterator
@@ -72,56 +71,17 @@
   return nullptr;
 }
 
-
-HeapObject* SemiSpaceIterator::next_object() { return Next(); }
-
-
-// -----------------------------------------------------------------------------
-// NewSpacePageIterator
-
-NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
-    : prev_page_(Page::FromAddress(space->ToSpaceStart())->prev_page()),
-      next_page_(Page::FromAddress(space->ToSpaceStart())),
-      last_page_(Page::FromAllocationAreaAddress(space->ToSpaceEnd())) {}
-
-NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
-    : prev_page_(space->anchor()),
-      next_page_(prev_page_->next_page()),
-      last_page_(prev_page_->prev_page()) {}
-
-NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
-    : prev_page_(Page::FromAddress(start)->prev_page()),
-      next_page_(Page::FromAddress(start)),
-      last_page_(Page::FromAllocationAreaAddress(limit)) {
-  SemiSpace::AssertValidRange(start, limit);
-}
-
-
-bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; }
-
-Page* NewSpacePageIterator::next() {
-  DCHECK(has_next());
-  prev_page_ = next_page_;
-  next_page_ = next_page_->next_page();
-  return prev_page_;
-}
-
-
 // -----------------------------------------------------------------------------
 // HeapObjectIterator
 
 HeapObject* HeapObjectIterator::Next() {
   do {
     HeapObject* next_obj = FromCurrentPage();
-    if (next_obj != NULL) return next_obj;
+    if (next_obj != nullptr) return next_obj;
   } while (AdvanceToNextPage());
-  return NULL;
+  return nullptr;
 }
 
-
-HeapObject* HeapObjectIterator::next_object() { return Next(); }
-
-
 HeapObject* HeapObjectIterator::FromCurrentPage() {
   while (cur_addr_ != cur_end_) {
     if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
@@ -129,15 +89,9 @@
       continue;
     }
     HeapObject* obj = HeapObject::FromAddress(cur_addr_);
-    int obj_size = obj->Size();
+    const int obj_size = obj->Size();
     cur_addr_ += obj_size;
-    DCHECK(cur_addr_ <= cur_end_);
-    // TODO(hpayer): Remove the debugging code.
-    if (cur_addr_ > cur_end_) {
-      space_->heap()->isolate()->PushStackTraceAndDie(0xaaaaaaaa, obj, NULL,
-                                                      obj_size);
-    }
-
+    DCHECK_LE(cur_addr_, cur_end_);
     if (!obj->IsFiller()) {
       if (obj->IsCode()) {
         DCHECK_EQ(space_, space_->heap()->code_space());
@@ -148,21 +102,7 @@
       return obj;
     }
   }
-  return NULL;
-}
-
-// -----------------------------------------------------------------------------
-// LargePageIterator
-
-LargePageIterator::LargePageIterator(LargeObjectSpace* space)
-    : next_page_(space->first_page()) {}
-
-LargePage* LargePageIterator::next() {
-  LargePage* result = next_page_;
-  if (next_page_ != nullptr) {
-    next_page_ = next_page_->next_page();
-  }
-  return result;
+  return nullptr;
 }
 
 // -----------------------------------------------------------------------------
@@ -209,9 +149,8 @@
 }
 
 bool SemiSpace::ContainsSlow(Address a) {
-  NewSpacePageIterator it(this);
-  while (it.has_next()) {
-    if (it.next() == MemoryChunk::FromAddress(a)) return true;
+  for (Page* p : *this) {
+    if (p == MemoryChunk::FromAddress(a)) return true;
   }
   return false;
 }
@@ -260,6 +199,7 @@
                                        : MemoryChunk::IN_TO_SPACE));
   Page* page = static_cast<Page*>(chunk);
   heap->incremental_marking()->SetNewSpacePageFlags(page);
+  page->AllocateLocalTracker();
   return page;
 }
 
@@ -270,7 +210,6 @@
 Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
                        PagedSpace* owner) {
   Page* page = reinterpret_cast<Page*>(chunk);
-  page->mutex_ = new base::Mutex();
   DCHECK(page->area_size() <= kAllocatableMemory);
   DCHECK(chunk->owner() == owner);
 
@@ -311,8 +250,8 @@
 
 void MemoryChunk::ResetLiveBytes() {
   if (FLAG_trace_live_bytes) {
-    PrintIsolate(heap()->isolate(), "live-bytes: reset page=%p %d->0\n", this,
-                 live_byte_count_);
+    PrintIsolate(heap()->isolate(), "live-bytes: reset page=%p %d->0\n",
+                 static_cast<void*>(this), live_byte_count_);
   }
   live_byte_count_ = 0;
 }
@@ -320,9 +259,9 @@
 void MemoryChunk::IncrementLiveBytes(int by) {
   if (IsFlagSet(BLACK_PAGE)) return;
   if (FLAG_trace_live_bytes) {
-    PrintIsolate(heap()->isolate(),
-                 "live-bytes: update page=%p delta=%d %d->%d\n", this, by,
-                 live_byte_count_, live_byte_count_ + by);
+    PrintIsolate(
+        heap()->isolate(), "live-bytes: update page=%p delta=%d %d->%d\n",
+        static_cast<void*>(this), by, live_byte_count_, live_byte_count_ + by);
   }
   live_byte_count_ += by;
   DCHECK_GE(live_byte_count_, 0);
@@ -382,6 +321,7 @@
 }
 
 void Page::MarkNeverAllocateForTesting() {
+  DCHECK(this->owner()->identity() != NEW_SPACE);
   DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
   SetFlag(NEVER_ALLOCATE_ON_PAGE);
   reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
@@ -404,42 +344,34 @@
   InitializeFreeListCategories();
 }
 
-MemoryChunkIterator::MemoryChunkIterator(Heap* heap, Mode mode)
-    : state_(kOldSpaceState),
-      mode_(mode),
-      old_iterator_(heap->old_space()),
-      code_iterator_(heap->code_space()),
-      map_iterator_(heap->map_space()),
-      lo_iterator_(heap->lo_space()) {}
+MemoryChunkIterator::MemoryChunkIterator(Heap* heap)
+    : heap_(heap),
+      state_(kOldSpaceState),
+      old_iterator_(heap->old_space()->begin()),
+      code_iterator_(heap->code_space()->begin()),
+      map_iterator_(heap->map_space()->begin()),
+      lo_iterator_(heap->lo_space()->begin()) {}
 
 MemoryChunk* MemoryChunkIterator::next() {
   switch (state_) {
     case kOldSpaceState: {
-      if (old_iterator_.has_next()) {
-        return old_iterator_.next();
-      }
+      if (old_iterator_ != heap_->old_space()->end()) return *(old_iterator_++);
       state_ = kMapState;
       // Fall through.
     }
     case kMapState: {
-      if (mode_ != ALL_BUT_MAP_SPACE && map_iterator_.has_next()) {
-        return map_iterator_.next();
-      }
+      if (map_iterator_ != heap_->map_space()->end()) return *(map_iterator_++);
       state_ = kCodeState;
       // Fall through.
     }
     case kCodeState: {
-      if (mode_ != ALL_BUT_CODE_SPACE && code_iterator_.has_next()) {
-        return code_iterator_.next();
-      }
+      if (code_iterator_ != heap_->code_space()->end())
+        return *(code_iterator_++);
       state_ = kLargeObjectState;
       // Fall through.
     }
     case kLargeObjectState: {
-      MemoryChunk* answer = lo_iterator_.next();
-      if (answer != nullptr) {
-        return answer;
-      }
+      if (lo_iterator_ != heap_->lo_space()->end()) return *(lo_iterator_++);
       state_ = kFinishedState;
       // Fall through;
     }
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
index e517c45..1dcd044 100644
--- a/src/heap/spaces.cc
+++ b/src/heap/spaces.cc
@@ -4,10 +4,13 @@
 
 #include "src/heap/spaces.h"
 
+#include <utility>
+
 #include "src/base/bits.h"
 #include "src/base/platform/platform.h"
 #include "src/base/platform/semaphore.h"
 #include "src/full-codegen/full-codegen.h"
+#include "src/heap/array-buffer-tracker.h"
 #include "src/heap/slot-set.h"
 #include "src/macro-assembler.h"
 #include "src/msan.h"
@@ -21,50 +24,34 @@
 // ----------------------------------------------------------------------------
 // HeapObjectIterator
 
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
-  // You can't actually iterate over the anchor page.  It is not a real page,
-  // just an anchor for the double linked page list.  Initialize as if we have
-  // reached the end of the anchor page, then the first iteration will move on
-  // to the first page.
-  Initialize(space, NULL, NULL, kAllPagesInSpace);
-}
+HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
+    : cur_addr_(nullptr),
+      cur_end_(nullptr),
+      space_(space),
+      page_range_(space->anchor()->next_page(), space->anchor()),
+      current_page_(page_range_.begin()) {}
 
-
-HeapObjectIterator::HeapObjectIterator(Page* page) {
+HeapObjectIterator::HeapObjectIterator(Page* page)
+    : cur_addr_(nullptr),
+      cur_end_(nullptr),
+      space_(reinterpret_cast<PagedSpace*>(page->owner())),
+      page_range_(page),
+      current_page_(page_range_.begin()) {
+#ifdef DEBUG
   Space* owner = page->owner();
   DCHECK(owner == page->heap()->old_space() ||
          owner == page->heap()->map_space() ||
          owner == page->heap()->code_space());
-  Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
-             page->area_end(), kOnePageOnly);
-  DCHECK(page->SweepingDone());
+#endif  // DEBUG
 }
 
-
-void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
-                                    HeapObjectIterator::PageMode mode) {
-  space_ = space;
-  cur_addr_ = cur;
-  cur_end_ = end;
-  page_mode_ = mode;
-}
-
-
 // We have hit the end of the page and should advance to the next block of
 // objects.  This happens at the end of the page.
 bool HeapObjectIterator::AdvanceToNextPage() {
-  DCHECK(cur_addr_ == cur_end_);
-  if (page_mode_ == kOnePageOnly) return false;
-  Page* cur_page;
-  if (cur_addr_ == NULL) {
-    cur_page = space_->anchor();
-  } else {
-    cur_page = Page::FromAddress(cur_addr_ - 1);
-    DCHECK(cur_addr_ == cur_page->area_end());
-  }
-  cur_page = cur_page->next_page();
-  if (cur_page == space_->anchor()) return false;
-  cur_page->heap()
+  DCHECK_EQ(cur_addr_, cur_end_);
+  if (current_page_ == page_range_.end()) return false;
+  Page* cur_page = *(current_page_++);
+  space_->heap()
       ->mark_compact_collector()
       ->sweeper()
       .SweepOrWaitUntilSweepingCompleted(cur_page);
@@ -119,15 +106,16 @@
     requested = kMinimumCodeRangeSize;
   }
 
+  const size_t reserved_area =
+      kReservedCodeRangePages * base::OS::CommitPageSize();
+  if (requested < (kMaximalCodeRangeSize - reserved_area))
+    requested += reserved_area;
+
   DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
-#ifdef V8_TARGET_ARCH_MIPS64
-  // To use pseudo-relative jumps such as j/jal instructions which have 28-bit
-  // encoded immediate, the addresses have to be in range of 256Mb aligned
-  // region.
-  code_range_ = new base::VirtualMemory(requested, kMaximalCodeRangeSize);
-#else
-  code_range_ = new base::VirtualMemory(requested);
-#endif
+
+  code_range_ = new base::VirtualMemory(
+      requested, Max(kCodeRangeAreaAlignment,
+                     static_cast<size_t>(base::OS::AllocateAlignment())));
   CHECK(code_range_ != NULL);
   if (!code_range_->IsReserved()) {
     delete code_range_;
@@ -141,18 +129,16 @@
 
   // On some platforms, specifically Win64, we need to reserve some pages at
   // the beginning of an executable space.
-  if (kReservedCodeRangePages) {
-    if (!code_range_->Commit(
-            base, kReservedCodeRangePages * base::OS::CommitPageSize(), true)) {
+  if (reserved_area > 0) {
+    if (!code_range_->Commit(base, reserved_area, true)) {
       delete code_range_;
       code_range_ = NULL;
       return false;
     }
-    base += kReservedCodeRangePages * base::OS::CommitPageSize();
+    base += reserved_area;
   }
   Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
-  size_t size = code_range_->size() - (aligned_base - base) -
-                kReservedCodeRangePages * base::OS::CommitPageSize();
+  size_t size = code_range_->size() - (aligned_base - base) - reserved_area;
   allocation_list_.Add(FreeBlock(aligned_base, size));
   current_allocation_block_index_ = 0;
 
@@ -364,6 +350,7 @@
 };
 
 void MemoryAllocator::Unmapper::FreeQueuedChunks() {
+  ReconsiderDelayedChunks();
   if (FLAG_concurrent_sweeping) {
     V8::GetCurrentPlatform()->CallOnBackgroundThread(
         new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask);
@@ -397,6 +384,24 @@
   }
 }
 
+void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
+  std::list<MemoryChunk*> delayed_chunks(std::move(delayed_regular_chunks_));
+  // Move constructed, so the permanent list should be empty.
+  DCHECK(delayed_regular_chunks_.empty());
+  for (auto it = delayed_chunks.begin(); it != delayed_chunks.end(); ++it) {
+    AddMemoryChunkSafe<kRegular>(*it);
+  }
+}
+
+bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
+  MarkCompactCollector* mc = isolate_->heap()->mark_compact_collector();
+  // We cannot free memory chunks in new space while the sweeper is running
+  // since a sweeper thread might be stuck right before trying to lock the
+  // corresponding page.
+  return !chunk->InNewSpace() || (mc == nullptr) ||
+         mc->sweeper().IsSweepingCompleted();
+}
+
 bool MemoryAllocator::CommitMemory(Address base, size_t size,
                                    Executability executable) {
   if (!base::VirtualMemory::CommitRegion(base, size,
@@ -414,8 +419,8 @@
   // Code which is part of the code-range does not have its own VirtualMemory.
   DCHECK(code_range() == NULL ||
          !code_range()->contains(static_cast<Address>(reservation->address())));
-  DCHECK(executable == NOT_EXECUTABLE || code_range() == NULL ||
-         !code_range()->valid() || reservation->size() <= Page::kPageSize);
+  DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid() ||
+         reservation->size() <= Page::kPageSize);
 
   reservation->Release();
 }
@@ -429,8 +434,7 @@
     DCHECK(executable == EXECUTABLE);
     code_range()->FreeRawMemory(base, size);
   } else {
-    DCHECK(executable == NOT_EXECUTABLE || code_range() == NULL ||
-           !code_range()->valid());
+    DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
     bool result = base::VirtualMemory::ReleaseRegion(base, size);
     USE(result);
     DCHECK(result);
@@ -506,19 +510,21 @@
   chunk->InitializeReservedMemory();
   chunk->old_to_new_slots_ = nullptr;
   chunk->old_to_old_slots_ = nullptr;
+  chunk->typed_old_to_new_slots_ = nullptr;
   chunk->typed_old_to_old_slots_ = nullptr;
   chunk->skip_list_ = nullptr;
   chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
   chunk->progress_bar_ = 0;
   chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
   chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
-  chunk->mutex_ = nullptr;
+  chunk->mutex_ = new base::Mutex();
   chunk->available_in_free_list_ = 0;
   chunk->wasted_memory_ = 0;
   chunk->ResetLiveBytes();
   Bitmap::Clear(chunk);
   chunk->set_next_chunk(nullptr);
   chunk->set_prev_chunk(nullptr);
+  chunk->local_tracker_ = nullptr;
 
   DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
   DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
@@ -560,8 +566,7 @@
       }
     } else {
       CodeRange* code_range = heap_->memory_allocator()->code_range();
-      DCHECK(code_range != NULL && code_range->valid() &&
-             IsFlagSet(IS_EXECUTABLE));
+      DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE));
       if (!code_range->CommitRawMemory(start, length)) return false;
     }
 
@@ -577,8 +582,7 @@
       if (!reservation_.Uncommit(start, length)) return false;
     } else {
       CodeRange* code_range = heap_->memory_allocator()->code_range();
-      DCHECK(code_range != NULL && code_range->valid() &&
-             IsFlagSet(IS_EXECUTABLE));
+      DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE));
       if (!code_range->UncommitRawMemory(start, length)) return false;
     }
   }
@@ -672,10 +676,9 @@
 #ifdef V8_TARGET_ARCH_MIPS64
     // Use code range only for large object space on mips64 to keep address
     // range within 256-MB memory region.
-    if (code_range() != NULL && code_range()->valid() &&
-        reserve_area_size > CodePageAreaSize()) {
+    if (code_range()->valid() && reserve_area_size > CodePageAreaSize()) {
 #else
-    if (code_range() != NULL && code_range()->valid()) {
+    if (code_range()->valid()) {
 #endif
       base =
           code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size);
@@ -727,10 +730,6 @@
       static_cast<int>(chunk_size));
 
   LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
-  if (owner != NULL) {
-    ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
-    PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
-  }
 
   // We cannot use the last chunk in the address space because we would
   // overflow when comparing top and limit if this chunk is used for a
@@ -762,11 +761,6 @@
 void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
   DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
   LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
-  if (chunk->owner() != NULL) {
-    ObjectSpace space =
-        static_cast<ObjectSpace>(1 << chunk->owner()->identity());
-    PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
-  }
 
   isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
                                          chunk->IsEvacuationCandidate());
@@ -915,52 +909,6 @@
   }
 }
 
-
-void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
-                                                AllocationAction action,
-                                                size_t size) {
-  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
-    MemoryAllocationCallbackRegistration registration =
-        memory_allocation_callbacks_[i];
-    if ((registration.space & space) == space &&
-        (registration.action & action) == action)
-      registration.callback(space, action, static_cast<int>(size));
-  }
-}
-
-
-bool MemoryAllocator::MemoryAllocationCallbackRegistered(
-    MemoryAllocationCallback callback) {
-  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
-    if (memory_allocation_callbacks_[i].callback == callback) return true;
-  }
-  return false;
-}
-
-
-void MemoryAllocator::AddMemoryAllocationCallback(
-    MemoryAllocationCallback callback, ObjectSpace space,
-    AllocationAction action) {
-  DCHECK(callback != NULL);
-  MemoryAllocationCallbackRegistration registration(callback, space, action);
-  DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
-  return memory_allocation_callbacks_.Add(registration);
-}
-
-
-void MemoryAllocator::RemoveMemoryAllocationCallback(
-    MemoryAllocationCallback callback) {
-  DCHECK(callback != NULL);
-  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
-    if (memory_allocation_callbacks_[i].callback == callback) {
-      memory_allocation_callbacks_.Remove(i);
-      return;
-    }
-  }
-  UNREACHABLE();
-}
-
-
 #ifdef DEBUG
 void MemoryAllocator::ReportStatistics() {
   intptr_t size = Size();
@@ -1041,6 +989,9 @@
   }
   if (old_to_new_slots_ != nullptr) ReleaseOldToNewSlots();
   if (old_to_old_slots_ != nullptr) ReleaseOldToOldSlots();
+  if (typed_old_to_new_slots_ != nullptr) ReleaseTypedOldToNewSlots();
+  if (typed_old_to_old_slots_ != nullptr) ReleaseTypedOldToOldSlots();
+  if (local_tracker_ != nullptr) ReleaseLocalTracker();
 }
 
 static SlotSet* AllocateSlotSet(size_t size, Address page_start) {
@@ -1073,6 +1024,16 @@
   old_to_old_slots_ = nullptr;
 }
 
+void MemoryChunk::AllocateTypedOldToNewSlots() {
+  DCHECK(nullptr == typed_old_to_new_slots_);
+  typed_old_to_new_slots_ = new TypedSlotSet(address());
+}
+
+void MemoryChunk::ReleaseTypedOldToNewSlots() {
+  delete typed_old_to_new_slots_;
+  typed_old_to_new_slots_ = nullptr;
+}
+
 void MemoryChunk::AllocateTypedOldToOldSlots() {
   DCHECK(nullptr == typed_old_to_old_slots_);
   typed_old_to_old_slots_ = new TypedSlotSet(address());
@@ -1082,6 +1043,18 @@
   delete typed_old_to_old_slots_;
   typed_old_to_old_slots_ = nullptr;
 }
+
+void MemoryChunk::AllocateLocalTracker() {
+  DCHECK_NULL(local_tracker_);
+  local_tracker_ = new LocalArrayBufferTracker(heap());
+}
+
+void MemoryChunk::ReleaseLocalTracker() {
+  DCHECK_NOT_NULL(local_tracker_);
+  delete local_tracker_;
+  local_tracker_ = nullptr;
+}
+
 // -----------------------------------------------------------------------------
 // PagedSpace implementation
 
@@ -1120,9 +1093,10 @@
 
 
 void PagedSpace::TearDown() {
-  PageIterator iterator(this);
-  while (iterator.has_next()) {
-    heap()->memory_allocator()->Free<MemoryAllocator::kFull>(iterator.next());
+  for (auto it = begin(); it != end();) {
+    Page* page = *(it++);  // Will be erased.
+    ArrayBufferTracker::FreeAll(page);
+    heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
   }
   anchor_.set_next_page(&anchor_);
   anchor_.set_prev_page(&anchor_);
@@ -1178,10 +1152,8 @@
   AccountCommitted(other->CommittedMemory());
 
   // Move over pages.
-  PageIterator it(other);
-  Page* p = nullptr;
-  while (it.has_next()) {
-    p = it.next();
+  for (auto it = other->begin(); it != other->end();) {
+    Page* p = *(it++);
 
     // Relinking requires the category to be unlinked.
     other->UnlinkFreeListCategories(p);
@@ -1198,18 +1170,16 @@
   if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
   size_t size = 0;
-  PageIterator it(this);
-  while (it.has_next()) {
-    size += it.next()->CommittedPhysicalMemory();
+  for (Page* page : *this) {
+    size += page->CommittedPhysicalMemory();
   }
   return size;
 }
 
 bool PagedSpace::ContainsSlow(Address addr) {
   Page* p = Page::FromAddress(addr);
-  PageIterator iterator(this);
-  while (iterator.has_next()) {
-    if (iterator.next() == p) return true;
+  for (Page* page : *this) {
+    if (page == p) return true;
   }
   return false;
 }
@@ -1233,7 +1203,6 @@
   return Smi::FromInt(0);
 }
 
-
 bool PagedSpace::Expand() {
   int size = AreaSize();
   if (snapshotable() && !HasPages()) {
@@ -1257,7 +1226,8 @@
     Bitmap::SetAllBits(p);
     p->SetFlag(Page::BLACK_PAGE);
     if (FLAG_trace_incremental_marking) {
-      PrintIsolate(heap()->isolate(), "Added black page %p\n", p);
+      PrintIsolate(heap()->isolate(), "Added black page %p\n",
+                   static_cast<void*>(p));
     }
   }
 
@@ -1270,20 +1240,17 @@
 
 
 int PagedSpace::CountTotalPages() {
-  PageIterator it(this);
   int count = 0;
-  while (it.has_next()) {
-    it.next();
+  for (Page* page : *this) {
     count++;
+    USE(page);
   }
   return count;
 }
 
 
 void PagedSpace::ResetFreeListStatistics() {
-  PageIterator page_iterator(this);
-  while (page_iterator.has_next()) {
-    Page* page = page_iterator.next();
+  for (Page* page : *this) {
     page->ResetFreeListStatistics();
   }
 }
@@ -1326,9 +1293,7 @@
 void PagedSpace::Verify(ObjectVisitor* visitor) {
   bool allocation_pointer_found_in_space =
       (allocation_info_.top() == allocation_info_.limit());
-  PageIterator page_iterator(this);
-  while (page_iterator.has_next()) {
-    Page* page = page_iterator.next();
+  for (Page* page : *this) {
     CHECK(page->owner() == this);
     if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
       allocation_pointer_found_in_space = true;
@@ -1415,7 +1380,6 @@
   from_space_.TearDown();
 }
 
-
 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
 
 
@@ -1461,6 +1425,48 @@
   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
 }
 
+bool NewSpace::Rebalance() {
+  CHECK(heap()->promotion_queue()->is_empty());
+  // Order here is important to make use of the page pool.
+  return to_space_.EnsureCurrentCapacity() &&
+         from_space_.EnsureCurrentCapacity();
+}
+
+bool SemiSpace::EnsureCurrentCapacity() {
+  if (is_committed()) {
+    const int expected_pages = current_capacity_ / Page::kPageSize;
+    int actual_pages = 0;
+    Page* current_page = anchor()->next_page();
+    while (current_page != anchor()) {
+      actual_pages++;
+      current_page = current_page->next_page();
+      if (actual_pages > expected_pages) {
+        Page* to_remove = current_page->prev_page();
+        // Make sure we don't overtake the actual top pointer.
+        CHECK_NE(to_remove, current_page_);
+        to_remove->Unlink();
+        heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
+            to_remove);
+      }
+    }
+    while (actual_pages < expected_pages) {
+      actual_pages++;
+      current_page =
+          heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
+              Page::kAllocatableMemory, this, executable());
+      if (current_page == nullptr) return false;
+      DCHECK_NOT_NULL(current_page);
+      current_page->InsertAfter(anchor());
+      Bitmap::Clear(current_page);
+      current_page->SetFlags(anchor()->prev_page()->GetFlags(),
+                             Page::kCopyAllFlags);
+      heap()->CreateFillerObjectAt(current_page->area_start(),
+                                   current_page->area_size(),
+                                   ClearRecordedSlots::kNo);
+    }
+  }
+  return true;
+}
 
 void LocalAllocationBuffer::Close() {
   if (IsValid()) {
@@ -1517,11 +1523,9 @@
   Address old_top = allocation_info_.top();
   to_space_.Reset();
   UpdateAllocationInfo();
-  pages_used_ = 0;
   // Clear all mark-bits in the to-space.
-  NewSpacePageIterator it(&to_space_);
-  while (it.has_next()) {
-    Bitmap::Clear(it.next());
+  for (Page* p : to_space_) {
+    Bitmap::Clear(p);
   }
   InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
 }
@@ -1563,7 +1567,6 @@
 
   int remaining_in_page = static_cast<int>(limit - top);
   heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
-  pages_used_++;
   UpdateAllocationInfo();
 
   return true;
@@ -1739,7 +1742,12 @@
 
 void SemiSpace::TearDown() {
   // Properly uncommit memory to keep the allocator counters in sync.
-  if (is_committed()) Uncommit();
+  if (is_committed()) {
+    for (Page* p : *this) {
+      ArrayBufferTracker::FreeAll(p);
+    }
+    Uncommit();
+  }
   current_capacity_ = maximum_capacity_ = 0;
 }
 
@@ -1771,10 +1779,9 @@
 
 bool SemiSpace::Uncommit() {
   DCHECK(is_committed());
-  NewSpacePageIterator it(this);
-  while (it.has_next()) {
-    heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
-        it.next());
+  for (auto it = begin(); it != end();) {
+    Page* p = *(it++);
+    heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(p);
   }
   anchor()->set_next_page(anchor());
   anchor()->set_prev_page(anchor());
@@ -1788,9 +1795,8 @@
 size_t SemiSpace::CommittedPhysicalMemory() {
   if (!is_committed()) return 0;
   size_t size = 0;
-  NewSpacePageIterator it(this);
-  while (it.has_next()) {
-    size += it.next()->CommittedPhysicalMemory();
+  for (Page* p : *this) {
+    size += p->CommittedPhysicalMemory();
   }
   return size;
 }
@@ -1871,9 +1877,7 @@
   anchor_.prev_page()->set_next_page(&anchor_);
   anchor_.next_page()->set_prev_page(&anchor_);
 
-  NewSpacePageIterator it(this);
-  while (it.has_next()) {
-    Page* page = it.next();
+  for (Page* page : *this) {
     page->set_owner(this);
     page->SetFlags(flags, mask);
     if (id_ == kToSpace) {
@@ -1894,23 +1898,21 @@
 void SemiSpace::Reset() {
   DCHECK_NE(anchor_.next_page(), &anchor_);
   current_page_ = anchor_.next_page();
+  pages_used_ = 0;
 }
 
-bool SemiSpace::ReplaceWithEmptyPage(Page* old_page) {
-  // TODO(mlippautz): We do not have to get a new page here when the semispace
-  // is uncommitted later on.
-  Page* new_page = heap()->memory_allocator()->AllocatePage(
-      Page::kAllocatableMemory, this, executable());
-  if (new_page == nullptr) return false;
-  Bitmap::Clear(new_page);
-  new_page->SetFlags(old_page->GetFlags(), Page::kCopyAllFlags);
-  new_page->set_next_page(old_page->next_page());
-  new_page->set_prev_page(old_page->prev_page());
-  old_page->next_page()->set_prev_page(new_page);
-  old_page->prev_page()->set_next_page(new_page);
-  heap()->CreateFillerObjectAt(new_page->area_start(), new_page->area_size(),
-                               ClearRecordedSlots::kNo);
-  return true;
+void SemiSpace::RemovePage(Page* page) {
+  if (current_page_ == page) {
+    current_page_ = page->prev_page();
+  }
+  page->Unlink();
+}
+
+void SemiSpace::PrependPage(Page* page) {
+  page->SetFlags(current_page()->GetFlags(), Page::kCopyAllFlags);
+  page->set_owner(this);
+  page->InsertAfter(anchor());
+  pages_used_++;
 }
 
 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
@@ -1938,9 +1940,8 @@
   DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
   age_mark_ = mark;
   // Mark all pages up to the one containing mark.
-  NewSpacePageIterator it(space_start(), mark);
-  while (it.has_next()) {
-    it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
+  for (Page* p : NewSpacePageRange(space_start(), mark)) {
+    p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
   }
 }
 
@@ -2016,7 +2017,6 @@
   limit_ = end;
 }
 
-
 #ifdef DEBUG
 // heap_histograms is shared, always clear it before using it.
 static void ClearHistograms(Isolate* isolate) {
@@ -2034,25 +2034,22 @@
 
 
 static void ClearCodeKindStatistics(int* code_kind_statistics) {
-  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
+  for (int i = 0; i < AbstractCode::NUMBER_OF_KINDS; i++) {
     code_kind_statistics[i] = 0;
   }
 }
-
-
 static void ReportCodeKindStatistics(int* code_kind_statistics) {
   PrintF("\n   Code kind histograms: \n");
-  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
+  for (int i = 0; i < AbstractCode::NUMBER_OF_KINDS; i++) {
     if (code_kind_statistics[i] > 0) {
       PrintF("     %-20s: %10d bytes\n",
-             Code::Kind2String(static_cast<Code::Kind>(i)),
+             AbstractCode::Kind2String(static_cast<AbstractCode::Kind>(i)),
              code_kind_statistics[i]);
     }
   }
   PrintF("\n");
 }
 
-
 static int CollectHistogramInfo(HeapObject* obj) {
   Isolate* isolate = obj->GetIsolate();
   InstanceType type = obj->map()->instance_type();
@@ -2562,10 +2559,11 @@
 
 void FreeList::PrintCategories(FreeListCategoryType type) {
   FreeListCategoryIterator it(this, type);
-  PrintF("FreeList[%p, top=%p, %d] ", this, categories_[type], type);
+  PrintF("FreeList[%p, top=%p, %d] ", static_cast<void*>(this),
+         static_cast<void*>(categories_[type]), type);
   while (it.HasNext()) {
     FreeListCategory* current = it.Next();
-    PrintF("%p -> ", current);
+    PrintF("%p -> ", static_cast<void*>(current));
   }
   PrintF("null\n");
 }
@@ -2649,9 +2647,7 @@
   free_list_.RepairLists(heap());
   // Each page may have a small free space that is not tracked by a free list.
   // Update the maps for those free space objects.
-  PageIterator iterator(this);
-  while (iterator.has_next()) {
-    Page* page = iterator.next();
+  for (Page* page : *this) {
     int size = static_cast<int>(page->wasted_memory());
     if (size == 0) continue;
     Address address = page->OffsetToAddress(Page::kPageSize - size);
@@ -2748,12 +2744,15 @@
   return SweepAndRetryAllocation(size_in_bytes);
 }
 
-
 #ifdef DEBUG
 void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
   CommentStatistic* comments_statistics =
       isolate->paged_space_comments_statistics();
   ReportCodeKindStatistics(isolate->code_kind_statistics());
+  PrintF("Code size including metadata    : %10d bytes\n",
+         isolate->code_and_metadata_size());
+  PrintF("Bytecode size including metadata: %10d bytes\n",
+         isolate->bytecode_and_metadata_size());
   PrintF(
       "Code comment statistics (\"   [ comment-txt   :    size/   "
       "count  (average)\"):\n");
@@ -2767,7 +2766,6 @@
   PrintF("\n");
 }
 
-
 void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
   CommentStatistic* comments_statistics =
       isolate->paged_space_comments_statistics();
@@ -2843,40 +2841,28 @@
   EnterComment(isolate, comment_txt, flat_delta);
 }
 
-
-// Collects code size statistics:
-// - by code kind
-// - by code comment
-void PagedSpace::CollectCodeStatistics() {
-  Isolate* isolate = heap()->isolate();
-  HeapObjectIterator obj_it(this);
-  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
-    if (obj->IsAbstractCode()) {
-      AbstractCode* code = AbstractCode::cast(obj);
-      isolate->code_kind_statistics()[code->kind()] += code->Size();
-    }
-    if (obj->IsCode()) {
-      // TODO(mythria): Also enable this for BytecodeArray when it supports
-      // RelocInformation.
-      Code* code = Code::cast(obj);
-      RelocIterator it(code);
-      int delta = 0;
-      const byte* prev_pc = code->instruction_start();
-      while (!it.done()) {
-        if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
-          delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
-          CollectCommentStatistics(isolate, &it);
-          prev_pc = it.rinfo()->pc();
-        }
-        it.next();
-      }
-
-      DCHECK(code->instruction_start() <= prev_pc &&
-             prev_pc <= code->instruction_end());
-      delta += static_cast<int>(code->instruction_end() - prev_pc);
-      EnterComment(isolate, "NoComment", delta);
-    }
+// Collects code comment statistics
+static void CollectCodeCommentStatistics(HeapObject* obj, Isolate* isolate) {
+  if (!obj->IsCode()) {
+    return;
   }
+  Code* code = Code::cast(obj);
+  RelocIterator it(code);
+  int delta = 0;
+  const byte* prev_pc = code->instruction_start();
+  while (!it.done()) {
+    if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
+      delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
+      CollectCommentStatistics(isolate, &it);
+      prev_pc = it.rinfo()->pc();
+    }
+    it.next();
+  }
+
+  DCHECK(code->instruction_start() <= prev_pc &&
+         prev_pc <= code->instruction_end());
+  delta += static_cast<int>(code->instruction_end() - prev_pc);
+  EnterComment(isolate, "NoComment", delta);
 }
 
 
@@ -2897,6 +2883,44 @@
 }
 #endif
 
+static void RecordCodeSizeIncludingMetadata(AbstractCode* abstract_code,
+                                            Isolate* isolate) {
+  int size = abstract_code->SizeIncludingMetadata();
+  if (abstract_code->IsCode()) {
+    size += isolate->code_and_metadata_size();
+    isolate->set_code_and_metadata_size(size);
+  } else {
+    size += isolate->bytecode_and_metadata_size();
+    isolate->set_bytecode_and_metadata_size(size);
+  }
+}
+
+// Collects code size statistics:
+// - code and metadata size
+// - by code kind (only in debug mode)
+// - by code comment (only in debug mode)
+void PagedSpace::CollectCodeStatistics() {
+  Isolate* isolate = heap()->isolate();
+  HeapObjectIterator obj_it(this);
+  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
+    if (obj->IsAbstractCode()) {
+      AbstractCode* code = AbstractCode::cast(obj);
+      RecordCodeSizeIncludingMetadata(code, isolate);
+#ifdef DEBUG
+      isolate->code_kind_statistics()[code->kind()] += code->Size();
+      CollectCodeCommentStatistics(obj, isolate);
+#endif
+    }
+  }
+}
+
+void PagedSpace::ResetCodeAndMetadataStatistics(Isolate* isolate) {
+  isolate->set_code_and_metadata_size(0);
+  isolate->set_bytecode_and_metadata_size(0);
+#ifdef DEBUG
+  ResetCodeStatistics(isolate);
+#endif
+}
 
 // -----------------------------------------------------------------------------
 // MapSpace implementation
@@ -2926,15 +2950,13 @@
 // -----------------------------------------------------------------------------
 // LargeObjectSpace
 
-
 LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
     : Space(heap, id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
       first_page_(NULL),
       size_(0),
       page_count_(0),
       objects_size_(0),
-      chunk_map_(HashMap::PointersMatch, 1024) {}
-
+      chunk_map_(base::HashMap::PointersMatch, 1024) {}
 
 LargeObjectSpace::~LargeObjectSpace() {}
 
@@ -2954,10 +2976,6 @@
     LargePage* page = first_page_;
     first_page_ = first_page_->next_page();
     LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
-
-    ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
-    heap()->memory_allocator()->PerformAllocationCallback(
-        space, kAllocationActionFree, page->size());
     heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
   }
   SetUp();
@@ -2989,7 +3007,7 @@
   uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
   uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
   for (uintptr_t key = base; key <= limit; key++) {
-    HashMap::Entry* entry = chunk_map_.LookupOrInsert(
+    base::HashMap::Entry* entry = chunk_map_.LookupOrInsert(
         reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
     DCHECK(entry != NULL);
     entry->value = page;
@@ -3013,14 +3031,10 @@
 
 
 size_t LargeObjectSpace::CommittedPhysicalMemory() {
-  if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
-  size_t size = 0;
-  LargePage* current = first_page_;
-  while (current != NULL) {
-    size += current->CommittedPhysicalMemory();
-    current = current->next_page();
-  }
-  return size;
+  // On a platform that provides lazy committing of memory, we over-account
+  // the actually committed memory. There is no easy way right now to support
+  // precise accounting of committed memory in large object space.
+  return CommittedMemory();
 }
 
 
@@ -3036,8 +3050,8 @@
 
 LargePage* LargeObjectSpace::FindPage(Address a) {
   uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
-  HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
-                                        static_cast<uint32_t>(key));
+  base::HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
+                                              static_cast<uint32_t>(key));
   if (e != NULL) {
     DCHECK(e->value != NULL);
     LargePage* page = reinterpret_cast<LargePage*>(e->value);
@@ -3167,6 +3181,20 @@
 }
 #endif
 
+void LargeObjectSpace::CollectCodeStatistics() {
+  Isolate* isolate = heap()->isolate();
+  LargeObjectIterator obj_it(this);
+  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
+    if (obj->IsAbstractCode()) {
+      AbstractCode* code = AbstractCode::cast(obj);
+      RecordCodeSizeIncludingMetadata(code, isolate);
+#ifdef DEBUG
+      isolate->code_kind_statistics()[code->kind()] += code->Size();
+      CollectCodeCommentStatistics(obj, isolate);
+#endif
+    }
+  }
+}
 
 #ifdef DEBUG
 void LargeObjectSpace::Print() {
@@ -3196,21 +3224,9 @@
 }
 
 
-void LargeObjectSpace::CollectCodeStatistics() {
-  Isolate* isolate = heap()->isolate();
-  LargeObjectIterator obj_it(this);
-  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
-    if (obj->IsAbstractCode()) {
-      AbstractCode* code = AbstractCode::cast(obj);
-      isolate->code_kind_statistics()[code->kind()] += code->Size();
-    }
-  }
-}
-
-
 void Page::Print() {
   // Make a best-effort to print the objects in the page.
-  PrintF("Page@%p in %s\n", this->address(),
+  PrintF("Page@%p in %s\n", static_cast<void*>(this->address()),
          AllocationSpaceName(this->owner()->identity()));
   printf(" --------------------------------------\n");
   HeapObjectIterator objects(this);
diff --git a/src/heap/spaces.h b/src/heap/spaces.h
index 67e9aae..04c89a8 100644
--- a/src/heap/spaces.h
+++ b/src/heap/spaces.h
@@ -11,9 +11,9 @@
 #include "src/base/atomic-utils.h"
 #include "src/base/atomicops.h"
 #include "src/base/bits.h"
+#include "src/base/hashmap.h"
 #include "src/base/platform/mutex.h"
 #include "src/flags.h"
-#include "src/hashmap.h"
 #include "src/list.h"
 #include "src/objects.h"
 #include "src/utils.h"
@@ -27,6 +27,7 @@
 class CompactionSpaceCollection;
 class FreeList;
 class Isolate;
+class LocalArrayBufferTracker;
 class MemoryAllocator;
 class MemoryChunk;
 class Page;
@@ -424,6 +425,10 @@
     // from new to old space during evacuation.
     PAGE_NEW_OLD_PROMOTION,
 
+    // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
+    // within the new space during evacuation.
+    PAGE_NEW_NEW_PROMOTION,
+
     // A black page has all mark bits set to 1 (black). A black page currently
     // cannot be iterated because it is not swept. Moreover live bytes are also
     // not updated.
@@ -450,6 +455,11 @@
     //   has been aborted and needs special handling by the sweeper.
     COMPACTION_WAS_ABORTED,
 
+    // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
+    // on pages is sometimes aborted. The flag is used to avoid repeatedly
+    // triggering on the same page.
+    COMPACTION_WAS_ABORTED_FOR_TESTING,
+
     // |ANCHOR|: Flag is set if page is an anchor.
     ANCHOR,
 
@@ -509,6 +519,7 @@
   static const size_t kWriteBarrierCounterOffset =
       kOldToNewSlotsOffset + kPointerSize  // SlotSet* old_to_new_slots_;
       + kPointerSize                       // SlotSet* old_to_old_slots_;
+      + kPointerSize   // TypedSlotSet* typed_old_to_new_slots_;
       + kPointerSize   // TypedSlotSet* typed_old_to_old_slots_;
       + kPointerSize;  // SkipList* skip_list_;
 
@@ -522,7 +533,8 @@
       + kPointerSize      // AtomicValue next_chunk_
       + kPointerSize      // AtomicValue prev_chunk_
       // FreeListCategory categories_[kNumberOfCategories]
-      + FreeListCategory::kSize * kNumberOfCategories;
+      + FreeListCategory::kSize * kNumberOfCategories +
+      kPointerSize;  // LocalArrayBufferTracker* local_tracker_;
 
   // We add some more space to the computed header size to amount for missing
   // alignment requirements in our computation.
@@ -625,16 +637,24 @@
 
   inline SlotSet* old_to_new_slots() { return old_to_new_slots_; }
   inline SlotSet* old_to_old_slots() { return old_to_old_slots_; }
+  inline TypedSlotSet* typed_old_to_new_slots() {
+    return typed_old_to_new_slots_;
+  }
   inline TypedSlotSet* typed_old_to_old_slots() {
     return typed_old_to_old_slots_;
   }
+  inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
 
   void AllocateOldToNewSlots();
   void ReleaseOldToNewSlots();
   void AllocateOldToOldSlots();
   void ReleaseOldToOldSlots();
+  void AllocateTypedOldToNewSlots();
+  void ReleaseTypedOldToNewSlots();
   void AllocateTypedOldToOldSlots();
   void ReleaseTypedOldToOldSlots();
+  void AllocateLocalTracker();
+  void ReleaseLocalTracker();
 
   Address area_start() { return area_start_; }
   Address area_end() { return area_end_; }
@@ -645,6 +665,8 @@
   // Approximate amount of physical memory committed for this chunk.
   size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); }
 
+  Address HighWaterMark() { return address() + high_water_mark_.Value(); }
+
   int progress_bar() {
     DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
     return progress_bar_;
@@ -707,7 +729,8 @@
   }
 
   bool ShouldSkipEvacuationSlotRecording() {
-    return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
+    return ((flags_ & kSkipEvacuationSlotsRecordingMask) != 0) &&
+           !IsFlagSet(COMPACTION_WAS_ABORTED);
   }
 
   Executability executable() {
@@ -792,6 +815,7 @@
   // is ceil(size() / kPageSize).
   SlotSet* old_to_new_slots_;
   SlotSet* old_to_old_slots_;
+  TypedSlotSet* typed_old_to_new_slots_;
   TypedSlotSet* typed_old_to_old_slots_;
 
   SkipList* skip_list_;
@@ -817,6 +841,8 @@
 
   FreeListCategory categories_[kNumberOfCategories];
 
+  LocalArrayBufferTracker* local_tracker_;
+
  private:
   void InitializeReservedMemory() { reservation_.Reset(); }
 
@@ -1154,15 +1180,6 @@
   void FreeRawMemory(Address buf, size_t length);
 
  private:
-  // Frees the range of virtual memory, and frees the data structures used to
-  // manage it.
-  void TearDown();
-
-  Isolate* isolate_;
-
-  // The reserved range of virtual memory that all code objects are put in.
-  base::VirtualMemory* code_range_;
-  // Plain old data class, just a struct plus a constructor.
   class FreeBlock {
    public:
     FreeBlock() : start(0), size(0) {}
@@ -1181,6 +1198,26 @@
     size_t size;
   };
 
+  // Frees the range of virtual memory, and frees the data structures used to
+  // manage it.
+  void TearDown();
+
+  // Finds a block on the allocation list that contains at least the
+  // requested amount of memory.  If none is found, sorts and merges
+  // the existing free memory blocks, and searches again.
+  // If none can be found, returns false.
+  bool GetNextAllocationBlock(size_t requested);
+  // Compares the start addresses of two free blocks.
+  static int CompareFreeBlockAddress(const FreeBlock* left,
+                                     const FreeBlock* right);
+  bool ReserveBlock(const size_t requested_size, FreeBlock* block);
+  void ReleaseBlock(const FreeBlock* block);
+
+  Isolate* isolate_;
+
+  // The reserved range of virtual memory that all code objects are put in.
+  base::VirtualMemory* code_range_;
+
   // The global mutex guards free_list_ and allocation_list_ as GC threads may
   // access both lists concurrently to the main thread.
   base::Mutex code_range_mutex_;
@@ -1195,17 +1232,6 @@
   List<FreeBlock> allocation_list_;
   int current_allocation_block_index_;
 
-  // Finds a block on the allocation list that contains at least the
-  // requested amount of memory.  If none is found, sorts and merges
-  // the existing free memory blocks, and searches again.
-  // If none can be found, returns false.
-  bool GetNextAllocationBlock(size_t requested);
-  // Compares the start addresses of two free blocks.
-  static int CompareFreeBlockAddress(const FreeBlock* left,
-                                     const FreeBlock* right);
-  bool ReserveBlock(const size_t requested_size, FreeBlock* block);
-  void ReleaseBlock(const FreeBlock* block);
-
   DISALLOW_COPY_AND_ASSIGN(CodeRange);
 };
 
@@ -1321,7 +1347,12 @@
     template <ChunkQueueType type>
     void AddMemoryChunkSafe(MemoryChunk* chunk) {
       base::LockGuard<base::Mutex> guard(&mutex_);
-      chunks_[type].push_back(chunk);
+      if (type != kRegular || allocator_->CanFreeMemoryChunk(chunk)) {
+        chunks_[type].push_back(chunk);
+      } else {
+        DCHECK_EQ(type, kRegular);
+        delayed_regular_chunks_.push_back(chunk);
+      }
     }
 
     template <ChunkQueueType type>
@@ -1333,11 +1364,16 @@
       return chunk;
     }
 
+    void ReconsiderDelayedChunks();
     void PerformFreeMemoryOnQueuedChunks();
 
     base::Mutex mutex_;
     MemoryAllocator* allocator_;
     std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues];
+    // Delayed chunks cannot be processed in the current unmapping cycle because
+    // of dependencies such as an active sweeper.
+    // See MemoryAllocator::CanFreeMemoryChunk.
+    std::list<MemoryChunk*> delayed_regular_chunks_;
     base::Semaphore pending_unmapping_tasks_semaphore_;
     intptr_t concurrent_unmapping_tasks_active_;
 
@@ -1376,6 +1412,8 @@
   template <MemoryAllocator::FreeMode mode = kFull>
   void Free(MemoryChunk* chunk);
 
+  bool CanFreeMemoryChunk(MemoryChunk* chunk);
+
   // Returns allocated spaces in bytes.
   intptr_t Size() { return size_.Value(); }
 
@@ -1446,16 +1484,6 @@
   // filling it up with a recognizable non-NULL bit pattern.
   void ZapBlock(Address start, size_t size);
 
-  void PerformAllocationCallback(ObjectSpace space, AllocationAction action,
-                                 size_t size);
-
-  void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
-                                   ObjectSpace space, AllocationAction action);
-
-  void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
-
-  bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
-
   static int CodePageGuardStartOffset();
 
   static int CodePageGuardSize();
@@ -1516,19 +1544,6 @@
   base::AtomicValue<void*> lowest_ever_allocated_;
   base::AtomicValue<void*> highest_ever_allocated_;
 
-  struct MemoryAllocationCallbackRegistration {
-    MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
-                                         ObjectSpace space,
-                                         AllocationAction action)
-        : callback(callback), space(space), action(action) {}
-    MemoryAllocationCallback callback;
-    ObjectSpace space;
-    AllocationAction action;
-  };
-
-  // A List of callback that are triggered when memory is allocated or free'd
-  List<MemoryAllocationCallbackRegistration> memory_allocation_callbacks_;
-
   // Initializes pages in a chunk. Returns the first page address.
   // This function and GetChunkId() are provided for the mark-compact
   // collector to rebuild page headers in the from space, which is
@@ -1569,10 +1584,44 @@
 class ObjectIterator : public Malloced {
  public:
   virtual ~ObjectIterator() {}
-
-  virtual HeapObject* next_object() = 0;
+  virtual HeapObject* Next() = 0;
 };
 
+template <class PAGE_TYPE>
+class PageIteratorImpl
+    : public std::iterator<std::forward_iterator_tag, PAGE_TYPE> {
+ public:
+  explicit PageIteratorImpl(PAGE_TYPE* p) : p_(p) {}
+  PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE>& other) : p_(other.p_) {}
+  PAGE_TYPE* operator*() { return p_; }
+  bool operator==(const PageIteratorImpl<PAGE_TYPE>& rhs) {
+    return rhs.p_ == p_;
+  }
+  bool operator!=(const PageIteratorImpl<PAGE_TYPE>& rhs) {
+    return rhs.p_ != p_;
+  }
+  inline PageIteratorImpl<PAGE_TYPE>& operator++();
+  inline PageIteratorImpl<PAGE_TYPE> operator++(int);
+
+ private:
+  PAGE_TYPE* p_;
+};
+
+typedef PageIteratorImpl<Page> PageIterator;
+typedef PageIteratorImpl<LargePage> LargePageIterator;
+
+class PageRange {
+ public:
+  typedef PageIterator iterator;
+  PageRange(Page* begin, Page* end) : begin_(begin), end_(end) {}
+  explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
+  iterator begin() { return iterator(begin_); }
+  iterator end() { return iterator(end_); }
+
+ private:
+  Page* begin_;
+  Page* end_;
+};
 
 // -----------------------------------------------------------------------------
 // Heap object iterator in new/old/map spaces.
@@ -1591,18 +1640,10 @@
 
   // Advance to the next object, skipping free spaces and other fillers and
   // skipping the special garbage section of which there is one per space.
-  // Returns NULL when the iteration has ended.
-  inline HeapObject* Next();
-  inline HeapObject* next_object() override;
+  // Returns nullptr when the iteration has ended.
+  inline HeapObject* Next() override;
 
  private:
-  enum PageMode { kOnePageOnly, kAllPagesInSpace };
-
-  Address cur_addr_;              // Current iteration point.
-  Address cur_end_;               // End iteration point.
-  PagedSpace* space_;
-  PageMode page_mode_;
-
   // Fast (inlined) path of next().
   inline HeapObject* FromCurrentPage();
 
@@ -1610,28 +1651,11 @@
   // iteration has ended.
   bool AdvanceToNextPage();
 
-  // Initializes fields.
-  inline void Initialize(PagedSpace* owner, Address start, Address end,
-                         PageMode mode);
-};
-
-
-// -----------------------------------------------------------------------------
-// A PageIterator iterates the pages in a paged space.
-
-class PageIterator BASE_EMBEDDED {
- public:
-  explicit inline PageIterator(PagedSpace* space);
-
-  inline bool has_next();
-  inline Page* next();
-
- private:
+  Address cur_addr_;  // Current iteration point.
+  Address cur_end_;   // End iteration point.
   PagedSpace* space_;
-  Page* prev_page_;  // Previous page returned.
-  // Next page that will be returned.  Cached here so that we can use this
-  // iterator for operations that deallocate pages.
-  Page* next_page_;
+  PageRange page_range_;
+  PageRange::iterator current_page_;
 };
 
 
@@ -2083,8 +2107,21 @@
   AllocationInfo allocation_info_;
 };
 
+class NewSpacePageRange {
+ public:
+  typedef PageRange::iterator iterator;
+  inline NewSpacePageRange(Address start, Address limit);
+  iterator begin() { return range_.begin(); }
+  iterator end() { return range_.end(); }
+
+ private:
+  PageRange range_;
+};
+
 class PagedSpace : public Space {
  public:
+  typedef PageIterator iterator;
+
   static const intptr_t kCompactionMemoryWanted = 500 * KB;
 
   // Creates a space with an id.
@@ -2236,6 +2273,12 @@
   // The dummy page that anchors the linked list of pages.
   Page* anchor() { return &anchor_; }
 
+  // Collect code size related statistics
+  void CollectCodeStatistics();
+
+  // Reset code size related statistics
+  static void ResetCodeAndMetadataStatistics(Isolate* isolate);
+
 #ifdef VERIFY_HEAP
   // Verify integrity of this space.
   virtual void Verify(ObjectVisitor* visitor);
@@ -2253,7 +2296,6 @@
   void ReportStatistics();
 
   // Report code object related statistics
-  void CollectCodeStatistics();
   static void ReportCodeStatistics(Isolate* isolate);
   static void ResetCodeStatistics(Isolate* isolate);
 #endif
@@ -2288,6 +2330,9 @@
   inline void UnlinkFreeListCategories(Page* page);
   inline intptr_t RelinkFreeListCategories(Page* page);
 
+  iterator begin() { return iterator(anchor_.next_page()); }
+  iterator end() { return iterator(&anchor_); }
+
  protected:
   // PagedSpaces that should be included in snapshots have different, i.e.,
   // smaller, initial pages.
@@ -2342,7 +2387,6 @@
 
   friend class IncrementalMarking;
   friend class MarkCompactCollector;
-  friend class PageIterator;
 
   // Used in cctest.
   friend class HeapTester;
@@ -2393,6 +2437,8 @@
 // space as a marking stack when tracing live objects.
 class SemiSpace : public Space {
  public:
+  typedef PageIterator iterator;
+
   static void Swap(SemiSpace* from, SemiSpace* to);
 
   SemiSpace(Heap* heap, SemiSpaceId semispace)
@@ -2404,7 +2450,8 @@
         committed_(false),
         id_(semispace),
         anchor_(this),
-        current_page_(nullptr) {}
+        current_page_(nullptr),
+        pages_used_(0) {}
 
   inline bool Contains(HeapObject* o);
   inline bool Contains(Object* o);
@@ -2427,6 +2474,8 @@
   // than the current capacity.
   bool ShrinkTo(int new_capacity);
 
+  bool EnsureCurrentCapacity();
+
   // Returns the start address of the first page of the space.
   Address space_start() {
     DCHECK_NE(anchor_.next_page(), anchor());
@@ -2435,6 +2484,7 @@
 
   Page* first_page() { return anchor_.next_page(); }
   Page* current_page() { return current_page_; }
+  int pages_used() { return pages_used_; }
 
   // Returns one past the end address of the space.
   Address space_end() { return anchor_.prev_page()->area_end(); }
@@ -2447,15 +2497,23 @@
 
   bool AdvancePage() {
     Page* next_page = current_page_->next_page();
-    if (next_page == anchor()) return false;
+    // We cannot expand if we reached the maximum number of pages already. Note
+    // that we need to account for the next page already for this check as we
+    // could potentially fill the whole page after advancing.
+    const bool reached_max_pages = (pages_used_ + 1) == max_pages();
+    if (next_page == anchor() || reached_max_pages) {
+      return false;
+    }
     current_page_ = next_page;
+    pages_used_++;
     return true;
   }
 
   // Resets the space to using the first page.
   void Reset();
 
-  bool ReplaceWithEmptyPage(Page* page);
+  void RemovePage(Page* page);
+  void PrependPage(Page* page);
 
   // Age mark accessors.
   Address age_mark() { return age_mark_; }
@@ -2505,10 +2563,14 @@
   virtual void Verify();
 #endif
 
+  iterator begin() { return iterator(anchor_.next_page()); }
+  iterator end() { return iterator(anchor()); }
+
  private:
   void RewindPages(Page* start, int num_pages);
 
   inline Page* anchor() { return &anchor_; }
+  inline int max_pages() { return current_capacity_ / Page::kPageSize; }
 
   // Copies the flags into the masked positions on all pages in the space.
   void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
@@ -2516,7 +2578,8 @@
   // The currently committed space capacity.
   int current_capacity_;
 
-  // The maximum capacity that can be used by this space.
+  // The maximum capacity that can be used by this space. A space cannot grow
+  // beyond that size.
   int maximum_capacity_;
 
   // The minimum capacity for the space. A space cannot shrink below this size.
@@ -2530,9 +2593,10 @@
 
   Page anchor_;
   Page* current_page_;
+  int pages_used_;
 
+  friend class NewSpace;
   friend class SemiSpaceIterator;
-  friend class NewSpacePageIterator;
 };
 
 
@@ -2546,10 +2610,7 @@
   // Create an iterator over the allocated objects in the given to-space.
   explicit SemiSpaceIterator(NewSpace* space);
 
-  inline HeapObject* Next();
-
-  // Implementation of the ObjectIterator functions.
-  inline HeapObject* next_object() override;
+  inline HeapObject* Next() override;
 
  private:
   void Initialize(Address start, Address end);
@@ -2560,35 +2621,6 @@
   Address limit_;
 };
 
-
-// -----------------------------------------------------------------------------
-// A PageIterator iterates the pages in a semi-space.
-class NewSpacePageIterator BASE_EMBEDDED {
- public:
-  // Make an iterator that runs over all pages in to-space.
-  explicit inline NewSpacePageIterator(NewSpace* space);
-
-  // Make an iterator that runs over all pages in the given semispace,
-  // even those not used in allocation.
-  explicit inline NewSpacePageIterator(SemiSpace* space);
-
-  // Make iterator that iterates from the page containing start
-  // to the page that contains limit in the same semispace.
-  inline NewSpacePageIterator(Address start, Address limit);
-
-  inline bool has_next();
-  inline Page* next();
-
- private:
-  Page* prev_page_;  // Previous page returned.
-  // Next page that will be returned.  Cached here so that we can use this
-  // iterator for operations that deallocate pages.
-  Page* next_page_;
-  // Last page returned.
-  Page* last_page_;
-};
-
-
 // -----------------------------------------------------------------------------
 // The young generation space.
 //
@@ -2597,12 +2629,13 @@
 
 class NewSpace : public Space {
  public:
+  typedef PageIterator iterator;
+
   explicit NewSpace(Heap* heap)
       : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
         to_space_(heap, kToSpace),
         from_space_(heap, kFromSpace),
         reservation_(),
-        pages_used_(0),
         top_on_previous_step_(0),
         allocated_histogram_(nullptr),
         promoted_histogram_(nullptr) {}
@@ -2634,7 +2667,7 @@
 
   // Return the allocated bytes in the active semispace.
   intptr_t Size() override {
-    return pages_used_ * Page::kAllocatableMemory +
+    return to_space_.pages_used() * Page::kAllocatableMemory +
            static_cast<int>(top() - to_space_.page_low());
   }
 
@@ -2711,12 +2744,14 @@
     return static_cast<size_t>(allocated);
   }
 
-  bool ReplaceWithEmptyPage(Page* page) {
-    // This method is called after flipping the semispace.
+  void MovePageFromSpaceToSpace(Page* page) {
     DCHECK(page->InFromSpace());
-    return from_space_.ReplaceWithEmptyPage(page);
+    from_space_.RemovePage(page);
+    to_space_.PrependPage(page);
   }
 
+  bool Rebalance();
+
   // Return the maximum capacity of a semispace.
   int MaximumCapacity() {
     DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
@@ -2859,6 +2894,9 @@
   void PauseAllocationObservers() override;
   void ResumeAllocationObservers() override;
 
+  iterator begin() { return to_space_.begin(); }
+  iterator end() { return to_space_.end(); }
+
  private:
   // Update allocation info to match the current to-space page.
   void UpdateAllocationInfo();
@@ -2869,7 +2907,6 @@
   SemiSpace to_space_;
   SemiSpace from_space_;
   base::VirtualMemory reservation_;
-  int pages_used_;
 
   // Allocation pointer and limit for normal allocation and allocation during
   // mark-compact collection.
@@ -3003,6 +3040,8 @@
 
 class LargeObjectSpace : public Space {
  public:
+  typedef LargePageIterator iterator;
+
   LargeObjectSpace(Heap* heap, AllocationSpace id);
   virtual ~LargeObjectSpace();
 
@@ -3061,6 +3100,12 @@
 
   LargePage* first_page() { return first_page_; }
 
+  // Collect code statistics.
+  void CollectCodeStatistics();
+
+  iterator begin() { return iterator(first_page_); }
+  iterator end() { return iterator(nullptr); }
+
 #ifdef VERIFY_HEAP
   virtual void Verify();
 #endif
@@ -3068,7 +3113,6 @@
 #ifdef DEBUG
   void Print() override;
   void ReportStatistics();
-  void CollectCodeStatistics();
 #endif
 
  private:
@@ -3078,7 +3122,7 @@
   int page_count_;         // number of chunks
   intptr_t objects_size_;  // size of objects
   // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
-  HashMap chunk_map_;
+  base::HashMap chunk_map_;
 
   friend class LargeObjectIterator;
 };
@@ -3088,31 +3132,17 @@
  public:
   explicit LargeObjectIterator(LargeObjectSpace* space);
 
-  HeapObject* Next();
-
-  // implementation of ObjectIterator.
-  virtual HeapObject* next_object() { return Next(); }
+  HeapObject* Next() override;
 
  private:
   LargePage* current_;
 };
 
-class LargePageIterator BASE_EMBEDDED {
- public:
-  explicit inline LargePageIterator(LargeObjectSpace* space);
-
-  inline LargePage* next();
-
- private:
-  LargePage* next_page_;
-};
-
 // Iterates over the chunks (pages and large object pages) that can contain
 // pointers to new space or to evacuation candidates.
 class MemoryChunkIterator BASE_EMBEDDED {
  public:
-  enum Mode { ALL, ALL_BUT_MAP_SPACE, ALL_BUT_CODE_SPACE };
-  inline explicit MemoryChunkIterator(Heap* heap, Mode mode);
+  inline explicit MemoryChunkIterator(Heap* heap);
 
   // Return NULL when the iterator is done.
   inline MemoryChunk* next();
@@ -3125,8 +3155,8 @@
     kLargeObjectState,
     kFinishedState
   };
+  Heap* heap_;
   State state_;
-  const Mode mode_;
   PageIterator old_iterator_;
   PageIterator code_iterator_;
   PageIterator map_iterator_;
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index fdf11c1..925ae48 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -191,36 +191,24 @@
   return Memory::Address_at(pc_);
 }
 
+Address RelocInfo::wasm_global_reference() {
+  DCHECK(IsWasmGlobalReference(rmode_));
+  return Memory::Address_at(pc_);
+}
+
 uint32_t RelocInfo::wasm_memory_size_reference() {
   DCHECK(IsWasmMemorySizeReference(rmode_));
   return Memory::uint32_at(pc_);
 }
 
-void RelocInfo::update_wasm_memory_reference(
-    Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
-    ICacheFlushMode icache_flush_mode) {
-  DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
-  if (IsWasmMemoryReference(rmode_)) {
-    Address updated_reference;
-    DCHECK(old_base <= wasm_memory_reference() &&
-           wasm_memory_reference() < old_base + old_size);
-    updated_reference = new_base + (wasm_memory_reference() - old_base);
-    DCHECK(new_base <= updated_reference &&
-           updated_reference < new_base + new_size);
-    Memory::Address_at(pc_) = updated_reference;
-  } else if (IsWasmMemorySizeReference(rmode_)) {
-    uint32_t updated_size_reference;
-    DCHECK(wasm_memory_size_reference() <= old_size);
-    updated_size_reference =
-        new_size + (wasm_memory_size_reference() - old_size);
-    DCHECK(updated_size_reference <= new_size);
-    Memory::uint32_at(pc_) = updated_size_reference;
-  } else {
-    UNREACHABLE();
-  }
-  if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
-    Assembler::FlushICache(isolate_, pc_, sizeof(int32_t));
-  }
+void RelocInfo::unchecked_update_wasm_memory_reference(
+    Address address, ICacheFlushMode flush_mode) {
+  Memory::Address_at(pc_) = address;
+}
+
+void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
+                                                  ICacheFlushMode flush_mode) {
+  Memory::uint32_at(pc_) = size;
 }
 
 // -----------------------------------------------------------------------------
@@ -344,6 +332,8 @@
   desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
   desc->origin = this;
   desc->constant_pool_size = 0;
+  desc->unwinding_info_size = 0;
+  desc->unwinding_info = nullptr;
 }
 
 
@@ -730,6 +720,33 @@
   emit_operand(reg, op);
 }
 
+void Assembler::lock() {
+  EnsureSpace ensure_space(this);
+  EMIT(0xF0);
+}
+
+void Assembler::cmpxchg(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x0F);
+  EMIT(0xB1);
+  emit_operand(src, dst);
+}
+
+void Assembler::cmpxchg_b(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x0F);
+  EMIT(0xB0);
+  emit_operand(src, dst);
+}
+
+void Assembler::cmpxchg_w(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0xB1);
+  emit_operand(src, dst);
+}
+
 void Assembler::adc(Register dst, int32_t imm32) {
   EnsureSpace ensure_space(this);
   emit_arith(2, Operand(dst), Immediate(imm32));
@@ -1516,7 +1533,6 @@
 
 
 void Assembler::call(Label* L) {
-  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   if (L->is_bound()) {
     const int long_size = 5;
@@ -1534,7 +1550,6 @@
 
 
 void Assembler::call(byte* entry, RelocInfo::Mode rmode) {
-  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   DCHECK(!RelocInfo::IsCodeTarget(rmode));
   EMIT(0xE8);
@@ -1553,7 +1568,6 @@
 
 
 void Assembler::call(const Operand& adr) {
-  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   EMIT(0xFF);
   emit_operand(edx, adr);
@@ -1568,7 +1582,6 @@
 void Assembler::call(Handle<Code> code,
                      RelocInfo::Mode rmode,
                      TypeFeedbackId ast_id) {
-  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   DCHECK(RelocInfo::IsCodeTarget(rmode)
       || rmode == RelocInfo::CODE_AGE_SEQUENCE);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index c3edacb..4e542d7 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -124,8 +124,6 @@
     Register r = {code};
     return r;
   }
-  const char* ToString();
-  bool IsAllocatable() const;
   bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
   bool is(Register reg) const { return reg_code == reg.reg_code; }
   int code() const {
@@ -149,6 +147,8 @@
 #undef DECLARE_REGISTER
 const Register no_reg = {Register::kCode_no_reg};
 
+static const bool kSimpleFPAliasing = true;
+
 struct XMMRegister {
   enum Code {
 #define REGISTER_CODE(R) kCode_##R,
@@ -165,7 +165,6 @@
     return result;
   }
 
-  bool IsAllocatable() const;
   bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
 
   int code() const {
@@ -175,8 +174,6 @@
 
   bool is(XMMRegister reg) const { return reg_code == reg.reg_code; }
 
-  const char* ToString();
-
   int reg_code;
 };
 
@@ -662,6 +659,14 @@
   void xchg_b(Register reg, const Operand& op);
   void xchg_w(Register reg, const Operand& op);
 
+  // Lock prefix
+  void lock();
+
+  // CompareExchange
+  void cmpxchg(const Operand& dst, Register src);
+  void cmpxchg_b(const Operand& dst, Register src);
+  void cmpxchg_w(const Operand& dst, Register src);
+
   // Arithmetics
   void adc(Register dst, int32_t imm32);
   void adc(Register dst, const Operand& src);
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 232c56b..96b2787 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -16,10 +16,7 @@
 
 #define __ ACCESS_MASM(masm)
 
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
-                                CFunctionId id,
-                                BuiltinExtraArguments extra_args) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
   // ----------- S t a t e -------------
   //  -- eax                : number of arguments excluding receiver
   //  -- edi                : target
@@ -39,19 +36,11 @@
   __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
   // Insert extra arguments.
-  int num_extra_args = 0;
-  if (extra_args != BuiltinExtraArguments::kNone) {
-    __ PopReturnAddressTo(ecx);
-    if (extra_args & BuiltinExtraArguments::kTarget) {
-      ++num_extra_args;
-      __ Push(edi);
-    }
-    if (extra_args & BuiltinExtraArguments::kNewTarget) {
-      ++num_extra_args;
-      __ Push(edx);
-    }
-    __ PushReturnAddressFrom(ecx);
-  }
+  const int num_extra_args = 2;
+  __ PopReturnAddressTo(ecx);
+  __ Push(edi);
+  __ Push(edx);
+  __ PushReturnAddressFrom(ecx);
 
   // JumpToExternalReference expects eax to contain the number of arguments
   // including the receiver and the extra arguments.
@@ -395,8 +384,8 @@
   __ AssertGeneratorObject(ebx);
 
   // Store input value into generator object.
-  __ mov(FieldOperand(ebx, JSGeneratorObject::kInputOffset), eax);
-  __ RecordWriteField(ebx, JSGeneratorObject::kInputOffset, eax, ecx,
+  __ mov(FieldOperand(ebx, JSGeneratorObject::kInputOrDebugPosOffset), eax);
+  __ RecordWriteField(ebx, JSGeneratorObject::kInputOrDebugPosOffset, eax, ecx,
                       kDontSaveFPRegs);
 
   // Store resume mode into generator object.
@@ -407,22 +396,20 @@
   __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
 
   // Flood function if we are stepping.
-  Label skip_flooding;
-  ExternalReference step_in_enabled =
-      ExternalReference::debug_step_in_enabled_address(masm->isolate());
-  __ cmpb(Operand::StaticVariable(step_in_enabled), Immediate(0));
-  __ j(equal, &skip_flooding);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ Push(ebx);
-    __ Push(edx);
-    __ Push(edi);
-    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
-    __ Pop(edx);
-    __ Pop(ebx);
-    __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
-  }
-  __ bind(&skip_flooding);
+  Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+  Label stepping_prepared;
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(masm->isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  __ cmpb(Operand::StaticVariable(last_step_action), Immediate(StepIn));
+  __ j(greater_equal, &prepare_step_in_if_stepping);
+
+  // Flood function if we need to continue stepping in the suspended generator.
+  ExternalReference debug_suspended_generator =
+      ExternalReference::debug_suspended_generator_address(masm->isolate());
+  __ cmp(ebx, Operand::StaticVariable(debug_suspended_generator));
+  __ j(equal, &prepare_step_in_suspended_generator);
+  __ bind(&stepping_prepared);
 
   // Pop return address.
   __ PopReturnAddressTo(eax);
@@ -518,6 +505,51 @@
     __ mov(eax, ebx);  // Continuation expects generator object in eax.
     __ jmp(edx);
   }
+
+  __ bind(&prepare_step_in_if_stepping);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(ebx);
+    __ Push(edx);
+    __ Push(edi);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(edx);
+    __ Pop(ebx);
+    __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+  }
+  __ jmp(&stepping_prepared);
+
+  __ bind(&prepare_step_in_suspended_generator);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(ebx);
+    __ Push(edx);
+    __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+    __ Pop(edx);
+    __ Pop(ebx);
+    __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+  }
+  __ jmp(&stepping_prepared);
+}
+
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
+                                  Register scratch2) {
+  Register args_count = scratch1;
+  Register return_pc = scratch2;
+
+  // Get the arguments + reciever count.
+  __ mov(args_count,
+         Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ mov(args_count,
+         FieldOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+  // Leave the frame (also dropping the register file).
+  __ leave();
+
+  // Drop receiver + arguments.
+  __ pop(return_pc);
+  __ add(esp, args_count);
+  __ push(return_pc);
 }
 
 // Generate code for entering a JS function with the interpreter.
@@ -623,18 +655,7 @@
   masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
 
   // The return value is in eax.
-
-  // Get the arguments + reciever count.
-  __ mov(ebx, Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
-  __ mov(ebx, FieldOperand(ebx, BytecodeArray::kParameterSizeOffset));
-
-  // Leave the frame (also dropping the register file).
-  __ leave();
-
-  // Drop receiver + arguments and return.
-  __ pop(ecx);
-  __ add(esp, ebx);
-  __ push(ecx);
+  LeaveInterpreterFrame(masm, ebx, ecx);
   __ ret(0);
 
   // Load debug copy of the bytecode array.
@@ -661,6 +682,31 @@
   __ jmp(ecx);
 }
 
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+  // Save the function and context for call to CompileBaseline.
+  __ mov(edi, Operand(ebp, StandardFrameConstants::kFunctionOffset));
+  __ mov(kContextRegister,
+         Operand(ebp, StandardFrameConstants::kContextOffset));
+
+  // Leave the frame before recompiling for baseline so that we don't count as
+  // an activation on the stack.
+  LeaveInterpreterFrame(masm, ebx, ecx);
+
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    // Push return value.
+    __ push(eax);
+
+    // Push function as argument and compile for baseline.
+    __ push(edi);
+    __ CallRuntime(Runtime::kCompileBaseline);
+
+    // Restore return value.
+    __ pop(eax);
+  }
+  __ ret(0);
+}
+
 static void Generate_InterpreterPushArgs(MacroAssembler* masm,
                                          Register array_limit) {
   // ----------- S t a t e -------------
@@ -840,13 +886,30 @@
   const int bailout_id = BailoutId::None().ToInt();
   __ cmp(temp, Immediate(Smi::FromInt(bailout_id)));
   __ j(not_equal, &loop_bottom);
+
   // Literals available?
+  Label got_literals, maybe_cleared_weakcell;
   __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
                             SharedFunctionInfo::kOffsetToPreviousLiterals));
+
+  // temp contains either a WeakCell pointing to the literals array or the
+  // literals array directly.
+  STATIC_ASSERT(WeakCell::kValueOffset == FixedArray::kLengthOffset);
+  __ JumpIfSmi(FieldOperand(temp, WeakCell::kValueOffset),
+               &maybe_cleared_weakcell);
+  // The WeakCell value is a pointer, therefore it's a valid literals array.
   __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
-  __ JumpIfSmi(temp, &gotta_call_runtime);
+  __ jmp(&got_literals);
+
+  // We have a smi. If it's 0, then we are looking at a cleared WeakCell
+  // around the literals array, and we should visit the runtime. If it's > 0,
+  // then temp already contains the literals array.
+  __ bind(&maybe_cleared_weakcell);
+  __ cmp(FieldOperand(temp, WeakCell::kValueOffset), Immediate(0));
+  __ j(equal, &gotta_call_runtime);
 
   // Save the literals in the closure.
+  __ bind(&got_literals);
   __ mov(ecx, Operand(esp, 0));
   __ mov(FieldOperand(ecx, JSFunction::kLiteralsOffset), temp);
   __ push(index);
@@ -1119,6 +1182,9 @@
 void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
                                                int field_index) {
   // ----------- S t a t e -------------
+  //  -- eax    : number of arguments
+  //  -- edi    : function
+  //  -- esi    : context
   //  -- esp[0] : return address
   //  -- esp[4] : receiver
   // -----------------------------------
@@ -1161,7 +1227,11 @@
   __ bind(&receiver_not_date);
   {
     FrameScope scope(masm, StackFrame::MANUAL);
-    __ EnterFrame(StackFrame::INTERNAL);
+    __ Push(ebp);
+    __ Move(ebp, esp);
+    __ Push(esi);
+    __ Push(edi);
+    __ Push(Immediate(0));
     __ CallRuntime(Runtime::kThrowNotDateError);
   }
 }
@@ -1495,6 +1565,8 @@
 void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
   // ----------- S t a t e -------------
   //  -- eax                 : number of arguments
+  //  -- edi                 : function
+  //  -- esi                 : context
   //  -- esp[0]              : return address
   //  -- esp[(argc - n) * 8] : arg[n] (zero-based)
   //  -- esp[(argc + 1) * 8] : receiver
@@ -1522,27 +1594,32 @@
     __ mov(ebx, Operand(esp, ecx, times_pointer_size, 0));
 
     // Load the double value of the parameter into xmm1, maybe converting the
-    // parameter to a number first using the ToNumberStub if necessary.
+    // parameter to a number first using the ToNumber builtin if necessary.
     Label convert, convert_smi, convert_number, done_convert;
     __ bind(&convert);
     __ JumpIfSmi(ebx, &convert_smi);
     __ JumpIfRoot(FieldOperand(ebx, HeapObject::kMapOffset),
                   Heap::kHeapNumberMapRootIndex, &convert_number);
     {
-      // Parameter is not a Number, use the ToNumberStub to convert it.
-      FrameScope scope(masm, StackFrame::INTERNAL);
+      // Parameter is not a Number, use the ToNumber builtin to convert it.
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ Push(ebp);
+      __ Move(ebp, esp);
+      __ Push(esi);
+      __ Push(edi);
       __ SmiTag(eax);
       __ SmiTag(ecx);
       __ Push(eax);
       __ Push(ecx);
       __ Push(edx);
       __ mov(eax, ebx);
-      ToNumberStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
       __ mov(ebx, eax);
       __ Pop(edx);
       __ Pop(ecx);
       __ Pop(eax);
+      __ Pop(edi);
+      __ Pop(esi);
       {
         // Restore the double accumulator value (xmm0).
         Label restore_smi, done_restore;
@@ -1557,6 +1634,7 @@
       }
       __ SmiUntag(ecx);
       __ SmiUntag(eax);
+      __ leave();
     }
     __ jmp(&convert);
     __ bind(&convert_number);
@@ -1590,8 +1668,10 @@
 
     // Left and right hand side are equal, check for -0 vs. +0.
     __ bind(&compare_equal);
+    __ Push(edi);  // Preserve function in edi.
     __ movmskpd(edi, reg);
     __ test(edi, Immediate(1));
+    __ Pop(edi);
     __ j(not_zero, &compare_swap);
 
     __ bind(&done_compare);
@@ -1631,8 +1711,7 @@
   }
 
   // 2a. Convert the first argument to a number.
-  ToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
+  __ Jump(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
 
   // 2b. No arguments, return +0 (already in eax).
   __ bind(&no_arguments);
@@ -1682,8 +1761,7 @@
       __ Push(edi);
       __ Push(edx);
       __ Move(eax, ebx);
-      ToNumberStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
       __ Move(ebx, eax);
       __ Pop(edx);
       __ Pop(edi);
@@ -2575,6 +2653,81 @@
   __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
 }
 
+// static
+void Builtins::Generate_StringToNumber(MacroAssembler* masm) {
+  // The StringToNumber stub takes one argument in eax.
+  __ AssertString(eax);
+
+  // Check if string has a cached array index.
+  Label runtime;
+  __ test(FieldOperand(eax, String::kHashFieldOffset),
+          Immediate(String::kContainsCachedArrayIndexMask));
+  __ j(not_zero, &runtime, Label::kNear);
+  __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
+  __ IndexFromHash(eax, eax);
+  __ Ret();
+
+  __ bind(&runtime);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    // Push argument.
+    __ push(eax);
+    // We cannot use a tail call here because this builtin can also be called
+    // from wasm.
+    __ CallRuntime(Runtime::kStringToNumber);
+  }
+  __ Ret();
+}
+
+// static
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+  // The ToNumber stub takes one argument in eax.
+  Label not_smi;
+  __ JumpIfNotSmi(eax, &not_smi, Label::kNear);
+  __ Ret();
+  __ bind(&not_smi);
+
+  Label not_heap_number;
+  __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
+  __ j(not_equal, &not_heap_number, Label::kNear);
+  __ Ret();
+  __ bind(&not_heap_number);
+
+  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+          RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_NonNumberToNumber(MacroAssembler* masm) {
+  // The NonNumberToNumber stub takes one argument in eax.
+  __ AssertNotNumber(eax);
+
+  Label not_string;
+  __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edi);
+  // eax: object
+  // edi: object map
+  __ j(above_equal, &not_string, Label::kNear);
+  __ Jump(masm->isolate()->builtins()->StringToNumber(),
+          RelocInfo::CODE_TARGET);
+  __ bind(&not_string);
+
+  Label not_oddball;
+  __ CmpInstanceType(edi, ODDBALL_TYPE);
+  __ j(not_equal, &not_oddball, Label::kNear);
+  __ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset));
+  __ Ret();
+  __ bind(&not_oddball);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    // Push argument.
+    __ push(eax);
+    // We cannot use a tail call here because this builtin can also be called
+    // from wasm.
+    __ CallRuntime(Runtime::kToNumber);
+  }
+  __ Ret();
+}
+
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax : actual number of arguments
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index b711ce9..5761b16 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -22,78 +22,29 @@
 namespace v8 {
 namespace internal {
 
+#define __ ACCESS_MASM(masm)
 
-static void InitializeArrayConstructorDescriptor(
-    Isolate* isolate, CodeStubDescriptor* descriptor,
-    int constant_stack_parameter_count) {
-  // register state
-  // eax -- number of arguments
-  // edi -- function
-  // ebx -- allocation site with elements kind
-  Address deopt_handler = Runtime::FunctionForId(
-      Runtime::kArrayConstructor)->entry;
-
-  if (constant_stack_parameter_count == 0) {
-    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  } else {
-    descriptor->Initialize(eax, deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  }
+void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
+  __ pop(ecx);
+  __ mov(MemOperand(esp, eax, times_4, 0), edi);
+  __ push(edi);
+  __ push(ebx);
+  __ push(ecx);
+  __ add(eax, Immediate(3));
+  __ TailCallRuntime(Runtime::kNewArray);
 }
 
-
-static void InitializeInternalArrayConstructorDescriptor(
-    Isolate* isolate, CodeStubDescriptor* descriptor,
-    int constant_stack_parameter_count) {
-  // register state
-  // eax -- number of arguments
-  // edi -- constructor function
-  Address deopt_handler = Runtime::FunctionForId(
-      Runtime::kInternalArrayConstructor)->entry;
-
-  if (constant_stack_parameter_count == 0) {
-    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  } else {
-    descriptor->Initialize(eax, deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  }
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
   Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
   descriptor->Initialize(eax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
 }
 
-void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+void FastFunctionBindStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
+  Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
+  descriptor->Initialize(eax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
 }
 
-
-void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-
 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
                                                ExternalReference miss) {
   // Update the static counter each time a new code stub is generated.
@@ -680,7 +631,6 @@
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
                                           &miss,  // When index out of range.
-                                          STRING_INDEX_IS_ARRAY_INDEX,
                                           RECEIVER_IS_STRING);
   char_at_generator.GenerateFast(masm);
   __ ret(0);
@@ -1469,6 +1419,7 @@
   // edi : the function to call
   Isolate* isolate = masm->isolate();
   Label initialize, done, miss, megamorphic, not_array_function;
+  Label done_increment_count, done_initialize_count;
 
   // Load the cache state into ecx.
   __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
@@ -1481,7 +1432,7 @@
   // type-feedback-vector.h).
   Label check_allocation_site;
   __ cmp(edi, FieldOperand(ecx, WeakCell::kValueOffset));
-  __ j(equal, &done, Label::kFar);
+  __ j(equal, &done_increment_count, Label::kFar);
   __ CompareRoot(ecx, Heap::kmegamorphic_symbolRootIndex);
   __ j(equal, &done, Label::kFar);
   __ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset),
@@ -1504,7 +1455,7 @@
   __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
   __ cmp(edi, ecx);
   __ j(not_equal, &megamorphic);
-  __ jmp(&done, Label::kFar);
+  __ jmp(&done_increment_count, Label::kFar);
 
   __ bind(&miss);
 
@@ -1533,11 +1484,25 @@
   // slot.
   CreateAllocationSiteStub create_stub(isolate);
   CallStubInRecordCallTarget(masm, &create_stub);
-  __ jmp(&done);
+  __ jmp(&done_initialize_count);
 
   __ bind(&not_array_function);
   CreateWeakCellStub weak_cell_stub(isolate);
   CallStubInRecordCallTarget(masm, &weak_cell_stub);
+  __ bind(&done_initialize_count);
+
+  // Initialize the call counter.
+  __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+                      FixedArray::kHeaderSize + kPointerSize),
+         Immediate(Smi::FromInt(1)));
+  __ jmp(&done);
+
+  __ bind(&done_increment_count);
+  // Increment the call count for monomorphic function calls.
+  __ add(FieldOperand(ebx, edx, times_half_pointer_size,
+                      FixedArray::kHeaderSize + kPointerSize),
+         Immediate(Smi::FromInt(1)));
+
   __ bind(&done);
 }
 
@@ -1601,7 +1566,7 @@
   // Increment the call count for monomorphic function calls.
   __ add(FieldOperand(ebx, edx, times_half_pointer_size,
                       FixedArray::kHeaderSize + kPointerSize),
-         Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+         Immediate(Smi::FromInt(1)));
 
   __ mov(ebx, ecx);
   __ mov(edx, edi);
@@ -1649,7 +1614,7 @@
   // Increment the call count for monomorphic function calls.
   __ add(FieldOperand(ebx, edx, times_half_pointer_size,
                       FixedArray::kHeaderSize + kPointerSize),
-         Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+         Immediate(Smi::FromInt(1)));
 
   __ bind(&call_function);
   __ Set(eax, argc);
@@ -1720,7 +1685,7 @@
   // Initialize the call counter.
   __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
                       FixedArray::kHeaderSize + kPointerSize),
-         Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+         Immediate(Smi::FromInt(1)));
 
   // Store the function. Use a stub since we need a frame for allocation.
   // ebx - vector
@@ -1774,7 +1739,7 @@
   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
   // It is important that the store buffer overflow stubs are generated first.
-  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+  CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
   CreateWeakCellStub::GenerateAheadOfTime(isolate);
   BinaryOpICStub::GenerateAheadOfTime(isolate);
@@ -2102,13 +2067,7 @@
   }
   __ push(object_);
   __ push(index_);  // Consumed by runtime conversion function.
-  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
-    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
-  } else {
-    DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
-    // NumberToSmi discards numbers that are not exact integers.
-    __ CallRuntime(Runtime::kNumberToSmi);
-  }
+  __ CallRuntime(Runtime::kNumberToSmi);
   if (!index_.is(eax)) {
     // Save the conversion result before the pop instructions below
     // have a chance to overwrite it.
@@ -2441,77 +2400,12 @@
   // ecx: sub string length (smi)
   // edx: from index (smi)
   StringCharAtGenerator generator(eax, edx, ecx, eax, &runtime, &runtime,
-                                  &runtime, STRING_INDEX_IS_NUMBER,
-                                  RECEIVER_IS_STRING);
+                                  &runtime, RECEIVER_IS_STRING);
   generator.GenerateFast(masm);
   __ ret(3 * kPointerSize);
   generator.SkipSlow(masm, &runtime);
 }
 
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
-  // The ToNumber stub takes one argument in eax.
-  Label not_smi;
-  __ JumpIfNotSmi(eax, &not_smi, Label::kNear);
-  __ Ret();
-  __ bind(&not_smi);
-
-  Label not_heap_number;
-  __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
-  __ j(not_equal, &not_heap_number, Label::kNear);
-  __ Ret();
-  __ bind(&not_heap_number);
-
-  NonNumberToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
-}
-
-void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
-  // The NonNumberToNumber stub takes one argument in eax.
-  __ AssertNotNumber(eax);
-
-  Label not_string;
-  __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edi);
-  // eax: object
-  // edi: object map
-  __ j(above_equal, &not_string, Label::kNear);
-  StringToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
-  __ bind(&not_string);
-
-  Label not_oddball;
-  __ CmpInstanceType(edi, ODDBALL_TYPE);
-  __ j(not_equal, &not_oddball, Label::kNear);
-  __ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset));
-  __ Ret();
-  __ bind(&not_oddball);
-
-  __ pop(ecx);   // Pop return address.
-  __ push(eax);  // Push argument.
-  __ push(ecx);  // Push return address.
-  __ TailCallRuntime(Runtime::kToNumber);
-}
-
-void StringToNumberStub::Generate(MacroAssembler* masm) {
-  // The StringToNumber stub takes one argument in eax.
-  __ AssertString(eax);
-
-  // Check if string has a cached array index.
-  Label runtime;
-  __ test(FieldOperand(eax, String::kHashFieldOffset),
-          Immediate(String::kContainsCachedArrayIndexMask));
-  __ j(not_zero, &runtime, Label::kNear);
-  __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
-  __ IndexFromHash(eax, eax);
-  __ Ret();
-
-  __ bind(&runtime);
-  __ PopReturnAddressTo(ecx);     // Pop return address.
-  __ Push(eax);                   // Push argument.
-  __ PushReturnAddressFrom(ecx);  // Push return address.
-  __ TailCallRuntime(Runtime::kStringToNumber);
-}
-
 void ToStringStub::Generate(MacroAssembler* masm) {
   // The ToString stub takes one argument in eax.
   Label is_number;
@@ -2718,7 +2612,7 @@
   // Load ecx with the allocation site.  We stick an undefined dummy value here
   // and replace it with the real allocation site later when we instantiate this
   // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
-  __ mov(ecx, handle(isolate()->heap()->undefined_value()));
+  __ mov(ecx, isolate()->factory()->undefined_value());
 
   // Make sure that we actually patched the allocation site.
   if (FLAG_debug_code) {
@@ -3549,14 +3443,14 @@
 
 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  LoadICStub stub(isolate(), state());
+  LoadICStub stub(isolate());
   stub.GenerateForTrampoline(masm);
 }
 
 
 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  KeyedLoadICStub stub(isolate(), state());
+  KeyedLoadICStub stub(isolate());
   stub.GenerateForTrampoline(masm);
 }
 
@@ -4332,19 +4226,14 @@
   }
 }
 
-
-void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
       isolate);
   ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
       isolate);
-  ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
-      isolate);
-}
+  ArrayNArgumentsConstructorStub stub(isolate);
+  stub.GetCode();
 
-
-void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
-    Isolate* isolate) {
   ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
   for (int i = 0; i < 2; i++) {
     // For internal arrays we only need a few things
@@ -4352,8 +4241,6 @@
     stubh1.GetCode();
     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
     stubh2.GetCode();
-    InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
-    stubh3.GetCode();
   }
 }
 
@@ -4373,13 +4260,15 @@
     CreateArrayDispatchOneArgument(masm, mode);
 
     __ bind(&not_one_case);
-    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+    ArrayNArgumentsConstructorStub stub(masm->isolate());
+    __ TailCallStub(&stub);
   } else if (argument_count() == NONE) {
     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
   } else if (argument_count() == ONE) {
     CreateArrayDispatchOneArgument(masm, mode);
   } else if (argument_count() == MORE_THAN_ONE) {
-    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+    ArrayNArgumentsConstructorStub stub(masm->isolate());
+    __ TailCallStub(&stub);
   } else {
     UNREACHABLE();
   }
@@ -4491,7 +4380,7 @@
   __ TailCallStub(&stub1);
 
   __ bind(&not_one_case);
-  InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+  ArrayNArgumentsConstructorStub stubN(isolate());
   __ TailCallStub(&stubN);
 }
 
@@ -4804,8 +4693,11 @@
     __ mov(eax, edi);
     __ Ret();
 
-    // Fall back to %AllocateInNewSpace.
+    // Fall back to %AllocateInNewSpace (if not too big).
+    Label too_big_for_new_space;
     __ bind(&allocate);
+    __ cmp(ecx, Immediate(Page::kMaxRegularHeapObjectSize));
+    __ j(greater, &too_big_for_new_space);
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
       __ SmiTag(ecx);
@@ -4818,6 +4710,22 @@
       __ Pop(eax);
     }
     __ jmp(&done_allocate);
+
+    // Fall back to %NewRestParameter.
+    __ bind(&too_big_for_new_space);
+    __ PopReturnAddressTo(ecx);
+    // We reload the function from the caller frame due to register pressure
+    // within this stub. This is the slow path, hence reloading is preferable.
+    if (skip_stub_frame()) {
+      // For Ignition we need to skip the handler/stub frame to reach the
+      // JavaScript frame for the function.
+      __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+      __ Push(Operand(edx, StandardFrameConstants::kFunctionOffset));
+    } else {
+      __ Push(Operand(ebp, StandardFrameConstants::kFunctionOffset));
+    }
+    __ PushReturnAddressFrom(ecx);
+    __ TailCallRuntime(Runtime::kNewRestParameter);
   }
 }
 
@@ -5174,8 +5082,11 @@
   __ mov(eax, edi);
   __ Ret();
 
-  // Fall back to %AllocateInNewSpace.
+  // Fall back to %AllocateInNewSpace (if not too big).
+  Label too_big_for_new_space;
   __ bind(&allocate);
+  __ cmp(ecx, Immediate(Page::kMaxRegularHeapObjectSize));
+  __ j(greater, &too_big_for_new_space);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     __ SmiTag(ecx);
@@ -5188,37 +5099,22 @@
     __ Pop(eax);
   }
   __ jmp(&done_allocate);
-}
 
-
-void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
-  Register context_reg = esi;
-  Register slot_reg = ebx;
-  Register result_reg = eax;
-  Label slow_case;
-
-  // Go up context chain to the script context.
-  for (int i = 0; i < depth(); ++i) {
-    __ mov(result_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
-    context_reg = result_reg;
+  // Fall back to %NewStrictArguments.
+  __ bind(&too_big_for_new_space);
+  __ PopReturnAddressTo(ecx);
+  // We reload the function from the caller frame due to register pressure
+  // within this stub. This is the slow path, hence reloading is preferable.
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
+    __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+    __ Push(Operand(edx, StandardFrameConstants::kFunctionOffset));
+  } else {
+    __ Push(Operand(ebp, StandardFrameConstants::kFunctionOffset));
   }
-
-  // Load the PropertyCell value at the specified slot.
-  __ mov(result_reg, ContextOperand(context_reg, slot_reg));
-  __ mov(result_reg, FieldOperand(result_reg, PropertyCell::kValueOffset));
-
-  // Check that value is not the_hole.
-  __ CompareRoot(result_reg, Heap::kTheHoleValueRootIndex);
-  __ j(equal, &slow_case, Label::kNear);
-  __ Ret();
-
-  // Fallback to the runtime.
-  __ bind(&slow_case);
-  __ SmiTag(slot_reg);
-  __ Pop(result_reg);  // Pop return address.
-  __ Push(slot_reg);
-  __ Push(result_reg);  // Push return address.
-  __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
+  __ PushReturnAddressFrom(ecx);
+  __ TailCallRuntime(Runtime::kNewStrictArguments);
 }
 
 
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index fc813f5..c1878f0 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -301,8 +301,8 @@
                                   Register r2,
                                   Register r3) {
       for (int i = 0; i < Register::kNumRegisters; i++) {
-        Register candidate = Register::from_code(i);
-        if (candidate.IsAllocatable()) {
+        if (RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(i)) {
+          Register candidate = Register::from_code(i);
           if (candidate.is(ecx)) continue;
           if (candidate.is(r1)) continue;
           if (candidate.is(r2)) continue;
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 36c83cc..18e5364 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -34,43 +34,6 @@
 #define __ masm.
 
 
-UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
-  size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
-  if (buffer == nullptr) return nullptr;
-  ExternalReference::InitializeMathExpData();
-
-  MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
-                      CodeObjectRequired::kNo);
-  // esp[1 * kPointerSize]: raw double input
-  // esp[0 * kPointerSize]: return address
-  {
-    XMMRegister input = xmm1;
-    XMMRegister result = xmm2;
-    __ movsd(input, Operand(esp, 1 * kPointerSize));
-    __ push(eax);
-    __ push(ebx);
-
-    MathExpGenerator::EmitMathExp(&masm, input, result, xmm0, eax, ebx);
-
-    __ pop(ebx);
-    __ pop(eax);
-    __ movsd(Operand(esp, 1 * kPointerSize), result);
-    __ fld_d(Operand(esp, 1 * kPointerSize));
-    __ Ret();
-  }
-
-  CodeDesc desc;
-  masm.GetCode(&desc);
-  DCHECK(!RelocInfo::RequiresRelocation(desc));
-
-  Assembler::FlushICache(isolate, buffer, actual_size);
-  base::OS::ProtectCode(buffer, actual_size);
-  return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
-}
-
-
 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
   size_t actual_size;
   // Allocate buffer in executable space.
@@ -580,6 +543,7 @@
 
   __ push(eax);
   __ push(ebx);
+  __ push(esi);
 
   __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
 
@@ -620,8 +584,9 @@
 
   // Call into runtime if GC is required.
   __ bind(&gc_required);
+
   // Restore registers before jumping into runtime.
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  __ pop(esi);
   __ pop(ebx);
   __ pop(eax);
   __ jmp(fail);
@@ -656,12 +621,11 @@
   __ sub(edi, Immediate(Smi::FromInt(1)));
   __ j(not_sign, &loop);
 
+  // Restore registers.
+  __ pop(esi);
   __ pop(ebx);
   __ pop(eax);
 
-  // Restore esi.
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
   __ bind(&only_change_map);
   // eax: value
   // ebx: target map
@@ -927,64 +891,6 @@
   __ bind(&done);
 }
 
-
-static Operand ExpConstant(int index) {
-  return Operand::StaticVariable(ExternalReference::math_exp_constants(index));
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
-                                   XMMRegister input,
-                                   XMMRegister result,
-                                   XMMRegister double_scratch,
-                                   Register temp1,
-                                   Register temp2) {
-  DCHECK(!input.is(double_scratch));
-  DCHECK(!input.is(result));
-  DCHECK(!result.is(double_scratch));
-  DCHECK(!temp1.is(temp2));
-  DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
-  DCHECK(!masm->serializer_enabled());  // External references not serializable.
-
-  Label done;
-
-  __ movsd(double_scratch, ExpConstant(0));
-  __ xorpd(result, result);
-  __ ucomisd(double_scratch, input);
-  __ j(above_equal, &done);
-  __ ucomisd(input, ExpConstant(1));
-  __ movsd(result, ExpConstant(2));
-  __ j(above_equal, &done);
-  __ movsd(double_scratch, ExpConstant(3));
-  __ movsd(result, ExpConstant(4));
-  __ mulsd(double_scratch, input);
-  __ addsd(double_scratch, result);
-  __ movd(temp2, double_scratch);
-  __ subsd(double_scratch, result);
-  __ movsd(result, ExpConstant(6));
-  __ mulsd(double_scratch, ExpConstant(5));
-  __ subsd(double_scratch, input);
-  __ subsd(result, double_scratch);
-  __ movsd(input, double_scratch);
-  __ mulsd(input, double_scratch);
-  __ mulsd(result, input);
-  __ mov(temp1, temp2);
-  __ mulsd(result, ExpConstant(7));
-  __ subsd(result, double_scratch);
-  __ add(temp1, Immediate(0x1ff800));
-  __ addsd(result, ExpConstant(8));
-  __ and_(temp2, Immediate(0x7ff));
-  __ shr(temp1, 11);
-  __ shl(temp1, 20);
-  __ movd(input, temp1);
-  __ pshufd(input, input, static_cast<uint8_t>(0xe1));  // Order: 11 10 00 01
-  __ movsd(double_scratch, Operand::StaticArray(
-      temp2, times_8, ExternalReference::math_exp_log_table()));
-  __ orps(input, double_scratch);
-  __ mulsd(result, input);
-  __ bind(&done);
-}
-
 #undef __
 
 
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 133b1ad..46468e9 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -29,19 +29,6 @@
 };
 
 
-class MathExpGenerator : public AllStatic {
- public:
-  static void EmitMathExp(MacroAssembler* masm,
-                          XMMRegister input,
-                          XMMRegister result,
-                          XMMRegister double_scratch,
-                          Register temp1,
-                          Register temp2);
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index 656d3e9..c14a2a1 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -196,8 +196,7 @@
 
   const int kDoubleRegsSize = kDoubleSize * XMMRegister::kMaxNumRegisters;
   __ sub(esp, Immediate(kDoubleRegsSize));
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
     int code = config->GetAllocatableDoubleCode(i);
     XMMRegister xmm_reg = XMMRegister::from_code(code);
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index 8a1b3b5..be3530c 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -1232,6 +1232,10 @@
       return "shrd";  // 3-operand version.
     case 0xAB:
       return "bts";
+    case 0xB0:
+      return "cmpxchg_b";
+    case 0xB1:
+      return "cmpxchg";
     case 0xBC:
       return "bsf";
     case 0xBD:
@@ -1264,6 +1268,9 @@
     vex_byte0_ = *data;
     vex_byte1_ = *(data + 1);
     data += 2;
+  } else if (*data == 0xF0 /*lock*/) {
+    AppendToBuffer("lock ");
+    data++;
   }
 
   bool processed = true;  // Will be set to false if the current instruction
@@ -1496,6 +1503,18 @@
             } else {
               AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
             }
+          } else if (f0byte == 0xB0) {
+            // cmpxchg_b
+            data += 2;
+            AppendToBuffer("%s ", f0mnem);
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            data += PrintRightOperand(data);
+            AppendToBuffer(",%s", NameOfByteCPURegister(regop));
+          } else if (f0byte == 0xB1) {
+            // cmpxchg
+            data += 2;
+            data += PrintOperands(f0mnem, OPER_REG_OP_ORDER, data);
           } else if (f0byte == 0xBC) {
             data += 2;
             int mod, regop, rm;
@@ -1620,9 +1639,8 @@
           data++;
           int mod, regop, rm;
           get_modrm(*data, &mod, &regop, &rm);
-          AppendToBuffer("xchg_w ");
+          AppendToBuffer("xchg_w %s,", NameOfCPURegister(regop));
           data += PrintRightOperand(data);
-          AppendToBuffer(",%s", NameOfCPURegister(regop));
         } else if (*data == 0x89) {
           data++;
           int mod, regop, rm;
@@ -1897,6 +1915,9 @@
                            NameOfXMMRegister(regop),
                            NameOfXMMRegister(rm));
             data++;
+          } else if (*data == 0xB1) {
+            data++;
+            data += PrintOperands("cmpxchg_w", OPER_REG_OP_ORDER, data);
           } else {
             UnimplementedInstruction();
           }
@@ -2227,7 +2248,7 @@
 
 
 const char* NameConverter::NameOfAddress(byte* addr) const {
-  v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+  v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
   return tmp_buffer_.start();
 }
 
@@ -2290,7 +2311,7 @@
     buffer[0] = '\0';
     byte* prev_pc = pc;
     pc += d.InstructionDecode(buffer, pc);
-    fprintf(f, "%p", prev_pc);
+    fprintf(f, "%p", static_cast<void*>(prev_pc));
     fprintf(f, "    ");
 
     for (byte* bp = prev_pc; bp < pc; bp++) {
diff --git a/src/ia32/interface-descriptors-ia32.cc b/src/ia32/interface-descriptors-ia32.cc
index 8a877b1..98259c7 100644
--- a/src/ia32/interface-descriptors-ia32.cc
+++ b/src/ia32/interface-descriptors-ia32.cc
@@ -11,6 +11,14 @@
 
 const Register CallInterfaceDescriptor::ContextRegister() { return esi; }
 
+void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
+    CallInterfaceDescriptorData* data, int register_parameter_count) {
+  const Register default_stub_registers[] = {eax, ebx, ecx, edx, edi};
+  CHECK_LE(static_cast<size_t>(register_parameter_count),
+           arraysize(default_stub_registers));
+  data->InitializePlatformSpecific(register_parameter_count,
+                                   default_stub_registers);
+}
 
 const Register LoadDescriptor::ReceiverRegister() { return edx; }
 const Register LoadDescriptor::NameRegister() { return ecx; }
@@ -44,9 +52,6 @@
 const Register StoreTransitionDescriptor::MapRegister() { return ebx; }
 
 
-const Register LoadGlobalViaContextDescriptor::SlotRegister() { return ebx; }
-
-
 const Register StoreGlobalViaContextDescriptor::SlotRegister() { return ebx; }
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return eax; }
 
@@ -68,8 +73,6 @@
 const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
 const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
 
-const Register HasPropertyDescriptor::ObjectRegister() { return eax; }
-const Register HasPropertyDescriptor::KeyRegister() { return ebx; }
 
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -259,43 +262,27 @@
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
+void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // register state
   // eax -- number of arguments
   // edi -- function
   // ebx -- allocation site with elements kind
-  Register registers[] = {edi, ebx};
+  Register registers[] = {edi, ebx, eax};
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
+void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  // stack param count needs (constructor pointer, and single argument)
-  Register registers[] = {edi, ebx, eax};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void InternalArrayConstructorConstantArgCountDescriptor::
-    InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
   // register state
   // eax -- number of arguments
   // edi -- function
-  Register registers[] = {edi};
+  // ebx -- allocation site with elements kind
+  Register registers[] = {edi, ebx, eax};
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-
-void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  // stack param count needs (constructor pointer, and single argument)
-  Register registers[] = {edi, eax};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastArrayPushDescriptor::InitializePlatformSpecific(
+void VarArgFunctionDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // stack param count needs (arg count)
   Register registers[] = {eax};
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 08189e2..25a0a95 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1095,8 +1095,8 @@
 
 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
   mov(vector, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-  mov(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
-  mov(vector, FieldOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+  mov(vector, FieldOperand(vector, JSFunction::kLiteralsOffset));
+  mov(vector, FieldOperand(vector, LiteralsArray::kFeedbackVectorOffset));
 }
 
 
@@ -2362,10 +2362,11 @@
                                              const ParameterCount& expected,
                                              const ParameterCount& actual) {
   Label skip_flooding;
-  ExternalReference step_in_enabled =
-      ExternalReference::debug_step_in_enabled_address(isolate());
-  cmpb(Operand::StaticVariable(step_in_enabled), Immediate(0));
-  j(equal, &skip_flooding);
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  cmpb(Operand::StaticVariable(last_step_action), Immediate(StepIn));
+  j(less, &skip_flooding);
   {
     FrameScope frame(this,
                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -2678,7 +2679,7 @@
 
 
 void MacroAssembler::Move(Register dst, const Immediate& x) {
-  if (x.is_zero()) {
+  if (x.is_zero() && RelocInfo::IsNone(x.rmode_)) {
     xor_(dst, dst);  // Shorter than mov of 32-bit immediate 0.
   } else {
     mov(dst, x);
diff --git a/src/ic/access-compiler.h b/src/ic/access-compiler.h
index 50c2cc7..ecc5c08 100644
--- a/src/ic/access-compiler.h
+++ b/src/ic/access-compiler.h
@@ -58,7 +58,6 @@
   Register vector() const;
   Register scratch1() const { return registers_[2]; }
   Register scratch2() const { return registers_[3]; }
-  Register scratch3() const { return registers_[4]; }
 
   static Register* GetCallingConvention(Code::Kind);
   static Register* load_calling_convention();
diff --git a/src/ic/arm/access-compiler-arm.cc b/src/ic/arm/access-compiler-arm.cc
index d360f5a..9ce485e 100644
--- a/src/ic/arm/access-compiler-arm.cc
+++ b/src/ic/arm/access-compiler-arm.cc
@@ -19,19 +19,19 @@
 
 
 Register* PropertyAccessCompiler::load_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3, scratch4.
+  // receiver, name, scratch1, scratch2, scratch3.
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, r3, r0, r4, r5};
+  static Register registers[] = {receiver, name, r3, r0, r4};
   return registers;
 }
 
 
 Register* PropertyAccessCompiler::store_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3.
+  // receiver, name, scratch1, scratch2.
   Register receiver = StoreDescriptor::ReceiverRegister();
   Register name = StoreDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, r3, r4, r5};
+  static Register registers[] = {receiver, name, r3, r4};
   return registers;
 }
 
diff --git a/src/ic/arm/handler-compiler-arm.cc b/src/ic/arm/handler-compiler-arm.cc
index ed26a4e..b98602b 100644
--- a/src/ic/arm/handler-compiler-arm.cc
+++ b/src/ic/arm/handler-compiler-arm.cc
@@ -204,8 +204,9 @@
     MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
     Register scratch, Label* miss) {
   Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
-  DCHECK(cell->value()->IsTheHole());
-  Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
+  Isolate* isolate = masm->isolate();
+  DCHECK(cell->value()->IsTheHole(isolate));
+  Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
   __ LoadWeakValue(scratch, weak_cell, miss);
   __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
   __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
@@ -290,7 +291,7 @@
   Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
   bool call_data_undefined = false;
   // Put call data in place.
-  if (api_call_info->data()->IsUndefined()) {
+  if (api_call_info->data()->IsUndefined(isolate)) {
     call_data_undefined = true;
     __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
   } else {
@@ -437,28 +438,25 @@
   DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
          !scratch2.is(scratch1));
 
-  if (FLAG_eliminate_prototype_chain_checks) {
-    Handle<Cell> validity_cell =
-        Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
-    if (!validity_cell.is_null()) {
-      DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid),
-                validity_cell->value());
-      __ mov(scratch1, Operand(validity_cell));
-      __ ldr(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
-      __ cmp(scratch1, Operand(Smi::FromInt(Map::kPrototypeChainValid)));
-      __ b(ne, miss);
-    }
+  Handle<Cell> validity_cell =
+      Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+  if (!validity_cell.is_null()) {
+    DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
+    __ mov(scratch1, Operand(validity_cell));
+    __ ldr(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
+    __ cmp(scratch1, Operand(Smi::FromInt(Map::kPrototypeChainValid)));
+    __ b(ne, miss);
+  }
 
-    // The prototype chain of primitives (and their JSValue wrappers) depends
-    // on the native context, which can't be guarded by validity cells.
-    // |object_reg| holds the native context specific prototype in this case;
-    // we need to check its map.
-    if (check == CHECK_ALL_MAPS) {
-      __ ldr(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
-      Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
-      __ CmpWeakValue(scratch1, cell, scratch2);
-      __ b(ne, miss);
-    }
+  // The prototype chain of primitives (and their JSValue wrappers) depends
+  // on the native context, which can't be guarded by validity cells.
+  // |object_reg| holds the native context specific prototype in this case;
+  // we need to check its map.
+  if (check == CHECK_ALL_MAPS) {
+    __ ldr(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+    Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+    __ CmpWeakValue(scratch1, cell, scratch2);
+    __ b(ne, miss);
   }
 
   // Keep track of the current object in register reg.
@@ -494,8 +492,10 @@
            !current_map->is_access_check_needed());
 
     prototype = handle(JSObject::cast(current_map->prototype()));
-    if (current_map->is_dictionary_map() &&
-        !current_map->IsJSGlobalObjectMap()) {
+    if (current_map->IsJSGlobalObjectMap()) {
+      GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+                                name, scratch2, miss);
+    } else if (current_map->is_dictionary_map()) {
       DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
       if (!name->IsUniqueName()) {
         DCHECK(name->IsString());
@@ -505,33 +505,12 @@
              current->property_dictionary()->FindEntry(name) ==
                  NameDictionary::kNotFound);
 
-      if (FLAG_eliminate_prototype_chain_checks && depth > 1) {
+      if (depth > 1) {
         // TODO(jkummerow): Cache and re-use weak cell.
         __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
       }
       GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
                                        scratch2);
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-        __ ldr(holder_reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
-      }
-    } else {
-      Register map_reg = scratch1;
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
-      }
-      if (current_map->IsJSGlobalObjectMap()) {
-        GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
-                                  name, scratch2, miss);
-      } else if (!FLAG_eliminate_prototype_chain_checks &&
-                 (depth != 1 || check == CHECK_ALL_MAPS)) {
-        Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
-        __ CmpWeakValue(map_reg, cell, scratch2);
-        __ b(ne, miss);
-      }
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ ldr(holder_reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
-      }
     }
 
     reg = holder_reg;  // From now on the object will be in holder_reg.
@@ -545,17 +524,8 @@
   // Log the check depth.
   LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
 
-  if (!FLAG_eliminate_prototype_chain_checks &&
-      (depth != 0 || check == CHECK_ALL_MAPS)) {
-    // Check the holder map.
-    __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-    Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
-    __ CmpWeakValue(scratch1, cell, scratch2);
-    __ b(ne, miss);
-  }
-
   bool return_holder = return_what == RETURN_HOLDER;
-  if (FLAG_eliminate_prototype_chain_checks && return_holder && depth != 0) {
+  if (return_holder && depth != 0) {
     __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
   }
 
@@ -600,7 +570,7 @@
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
-  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
 
   // Compile the interceptor call, followed by inline code to load the
   // property from further up the prototype chain if the call fails.
@@ -660,7 +630,7 @@
 void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
   // Call the runtime system to load the interceptor.
   DCHECK(holder()->HasNamedInterceptor());
-  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
   PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
                            holder());
 
@@ -678,7 +648,7 @@
 
   // If the callback cannot leak, then push the callback directly,
   // otherwise wrap it in a weak cell.
-  if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+  if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
     __ mov(ip, Operand(callback));
   } else {
     Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
diff --git a/src/ic/arm/ic-arm.cc b/src/ic/arm/ic-arm.cc
index b0bcc88..f3af785 100644
--- a/src/ic/arm/ic-arm.cc
+++ b/src/ic/arm/ic-arm.cc
@@ -734,26 +734,6 @@
   GenerateMiss(masm);
 }
 
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  DCHECK(receiver.is(r1));
-  DCHECK(name.is(r2));
-  DCHECK(StoreDescriptor::ValueRegister().is(r0));
-
-  // Get the receiver from the stack and probe the stub cache.
-  Code::Flags flags =
-      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
-
-  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
-                                               receiver, name, r5, r6, r7, r8);
-
-  // Cache miss: Jump to runtime.
-  GenerateMiss(masm);
-}
-
-
 void StoreIC::GenerateMiss(MacroAssembler* masm) {
   StoreIC_PushArgs(masm);
 
@@ -845,8 +825,9 @@
   }
 
   if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, cmp=%p, delta=%d\n", address,
-           cmp_instruction_address, delta);
+    PrintF("[  patching ic at %p, cmp=%p, delta=%d\n",
+           static_cast<void*>(address),
+           static_cast<void*>(cmp_instruction_address), delta);
   }
 
   Address patch_address =
diff --git a/src/ic/arm64/access-compiler-arm64.cc b/src/ic/arm64/access-compiler-arm64.cc
index 892ce85..6273633 100644
--- a/src/ic/arm64/access-compiler-arm64.cc
+++ b/src/ic/arm64/access-compiler-arm64.cc
@@ -26,19 +26,19 @@
 // we use the same assignments as ARM to remain on the safe side.
 
 Register* PropertyAccessCompiler::load_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3, scratch4.
+  // receiver, name, scratch1, scratch2, scratch3.
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, x3, x0, x4, x5};
+  static Register registers[] = {receiver, name, x3, x0, x4};
   return registers;
 }
 
 
 Register* PropertyAccessCompiler::store_calling_convention() {
-  // receiver, value, scratch1, scratch2, scratch3.
+  // receiver, value, scratch1, scratch2.
   Register receiver = StoreDescriptor::ReceiverRegister();
   Register name = StoreDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, x3, x4, x5};
+  static Register registers[] = {receiver, name, x3, x4};
   return registers;
 }
 
diff --git a/src/ic/arm64/handler-compiler-arm64.cc b/src/ic/arm64/handler-compiler-arm64.cc
index 84c0397..1126beb 100644
--- a/src/ic/arm64/handler-compiler-arm64.cc
+++ b/src/ic/arm64/handler-compiler-arm64.cc
@@ -110,8 +110,9 @@
     MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
     Register scratch, Label* miss) {
   Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
-  DCHECK(cell->value()->IsTheHole());
-  Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
+  Isolate* isolate = masm->isolate();
+  DCHECK(cell->value()->IsTheHole(isolate));
+  Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
   __ LoadWeakValue(scratch, weak_cell, miss);
   __ Ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
   __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, miss);
@@ -197,7 +198,7 @@
   Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
   bool call_data_undefined = false;
   // Put call data in place.
-  if (api_call_info->data()->IsUndefined()) {
+  if (api_call_info->data()->IsUndefined(isolate)) {
     call_data_undefined = true;
     __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
   } else {
@@ -467,28 +468,25 @@
   DCHECK(!AreAliased(object_reg, scratch1, scratch2));
   DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
 
-  if (FLAG_eliminate_prototype_chain_checks) {
-    Handle<Cell> validity_cell =
-        Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
-    if (!validity_cell.is_null()) {
-      DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid),
-                validity_cell->value());
-      __ Mov(scratch1, Operand(validity_cell));
-      __ Ldr(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
-      __ Cmp(scratch1, Operand(Smi::FromInt(Map::kPrototypeChainValid)));
-      __ B(ne, miss);
-    }
+  Handle<Cell> validity_cell =
+      Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+  if (!validity_cell.is_null()) {
+    DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
+    __ Mov(scratch1, Operand(validity_cell));
+    __ Ldr(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
+    __ Cmp(scratch1, Operand(Smi::FromInt(Map::kPrototypeChainValid)));
+    __ B(ne, miss);
+  }
 
-    // The prototype chain of primitives (and their JSValue wrappers) depends
-    // on the native context, which can't be guarded by validity cells.
-    // |object_reg| holds the native context specific prototype in this case;
-    // we need to check its map.
-    if (check == CHECK_ALL_MAPS) {
-      __ Ldr(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
-      Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
-      __ CmpWeakValue(scratch1, cell, scratch2);
-      __ B(ne, miss);
-    }
+  // The prototype chain of primitives (and their JSValue wrappers) depends
+  // on the native context, which can't be guarded by validity cells.
+  // |object_reg| holds the native context specific prototype in this case;
+  // we need to check its map.
+  if (check == CHECK_ALL_MAPS) {
+    __ Ldr(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+    Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+    __ CmpWeakValue(scratch1, cell, scratch2);
+    __ B(ne, miss);
   }
 
   // Keep track of the current object in register reg.
@@ -525,8 +523,10 @@
            !current_map->is_access_check_needed());
 
     prototype = handle(JSObject::cast(current_map->prototype()));
-    if (current_map->is_dictionary_map() &&
-        !current_map->IsJSGlobalObjectMap()) {
+    if (current_map->IsJSGlobalObjectMap()) {
+      GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+                                name, scratch2, miss);
+    } else if (current_map->is_dictionary_map()) {
       DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
       if (!name->IsUniqueName()) {
         DCHECK(name->IsString());
@@ -535,34 +535,12 @@
       DCHECK(current.is_null() || (current->property_dictionary()->FindEntry(
                                        name) == NameDictionary::kNotFound));
 
-      if (FLAG_eliminate_prototype_chain_checks && depth > 1) {
+      if (depth > 1) {
         // TODO(jkummerow): Cache and re-use weak cell.
         __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
       }
       GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
                                        scratch2);
-
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-        __ Ldr(holder_reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
-      }
-    } else {
-      Register map_reg = scratch1;
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
-      }
-      if (current_map->IsJSGlobalObjectMap()) {
-        GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
-                                  name, scratch2, miss);
-      } else if (!FLAG_eliminate_prototype_chain_checks &&
-                 (depth != 1 || check == CHECK_ALL_MAPS)) {
-        Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
-        __ CmpWeakValue(map_reg, cell, scratch2);
-        __ B(ne, miss);
-      }
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ Ldr(holder_reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
-      }
     }
 
     reg = holder_reg;  // From now on the object will be in holder_reg.
@@ -576,17 +554,8 @@
   // Log the check depth.
   LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
 
-  if (!FLAG_eliminate_prototype_chain_checks &&
-      (depth != 0 || check == CHECK_ALL_MAPS)) {
-    // Check the holder map.
-    __ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-    Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
-    __ CmpWeakValue(scratch1, cell, scratch2);
-    __ B(ne, miss);
-  }
-
   bool return_holder = return_what == RETURN_HOLDER;
-  if (FLAG_eliminate_prototype_chain_checks && return_holder && depth != 0) {
+  if (return_holder && depth != 0) {
     __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
   }
 
@@ -637,7 +606,7 @@
   DCHECK(!AreAliased(receiver(), this->name(), scratch1(), scratch2(),
                      scratch3()));
   DCHECK(holder()->HasNamedInterceptor());
-  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
 
   // Compile the interceptor call, followed by inline code to load the
   // property from further up the prototype chain if the call fails.
@@ -696,7 +665,7 @@
 void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
   // Call the runtime system to load the interceptor.
   DCHECK(holder()->HasNamedInterceptor());
-  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
   PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
                            holder());
 
@@ -718,7 +687,7 @@
   DCHECK(!AreAliased(holder_reg, scratch1(), scratch2(), value()));
   // If the callback cannot leak, then push the callback directly,
   // otherwise wrap it in a weak cell.
-  if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+  if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
     __ Mov(scratch1(), Operand(callback));
   } else {
     Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
diff --git a/src/ic/arm64/ic-arm64.cc b/src/ic/arm64/ic-arm64.cc
index 3fd384e..69b8c2f 100644
--- a/src/ic/arm64/ic-arm64.cc
+++ b/src/ic/arm64/ic-arm64.cc
@@ -724,24 +724,6 @@
   GenerateMiss(masm);
 }
 
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  DCHECK(!AreAliased(receiver, name, StoreDescriptor::ValueRegister(), x5, x6,
-                     x7, x8));
-
-  // Probe the stub cache.
-  Code::Flags flags =
-      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
-  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
-                                               receiver, name, x5, x6, x7, x8);
-
-  // Cache miss: Jump to runtime.
-  GenerateMiss(masm);
-}
-
-
 void StoreIC::GenerateMiss(MacroAssembler* masm) {
   StoreIC_PushArgs(masm);
 
@@ -821,8 +803,9 @@
   }
 
   if (FLAG_trace_ic) {
-    PrintF("[  Patching ic at %p, marker=%p, SMI check=%p\n", address,
-           info_address, reinterpret_cast<void*>(info.SmiCheck()));
+    PrintF("[  Patching ic at %p, marker=%p, SMI check=%p\n",
+           static_cast<void*>(address), static_cast<void*>(info_address),
+           static_cast<void*>(info.SmiCheck()));
   }
 
   // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
diff --git a/src/ic/call-optimization.cc b/src/ic/call-optimization.cc
index 571b614..f7a1f69 100644
--- a/src/ic/call-optimization.cc
+++ b/src/ic/call-optimization.cc
@@ -89,11 +89,12 @@
 
 void CallOptimization::Initialize(
     Handle<FunctionTemplateInfo> function_template_info) {
-  if (function_template_info->call_code()->IsUndefined()) return;
+  Isolate* isolate = function_template_info->GetIsolate();
+  if (function_template_info->call_code()->IsUndefined(isolate)) return;
   api_call_info_ =
       handle(CallHandlerInfo::cast(function_template_info->call_code()));
 
-  if (!function_template_info->signature()->IsUndefined()) {
+  if (!function_template_info->signature()->IsUndefined(isolate)) {
     expected_receiver_type_ =
         handle(FunctionTemplateInfo::cast(function_template_info->signature()));
   }
@@ -110,15 +111,17 @@
 
 void CallOptimization::AnalyzePossibleApiFunction(Handle<JSFunction> function) {
   if (!function->shared()->IsApiFunction()) return;
-  Handle<FunctionTemplateInfo> info(function->shared()->get_api_func_data());
+  Isolate* isolate = function->GetIsolate();
+  Handle<FunctionTemplateInfo> info(function->shared()->get_api_func_data(),
+                                    isolate);
 
   // Require a C++ callback.
-  if (info->call_code()->IsUndefined()) return;
-  api_call_info_ = handle(CallHandlerInfo::cast(info->call_code()));
+  if (info->call_code()->IsUndefined(isolate)) return;
+  api_call_info_ = handle(CallHandlerInfo::cast(info->call_code()), isolate);
 
-  if (!info->signature()->IsUndefined()) {
+  if (!info->signature()->IsUndefined(isolate)) {
     expected_receiver_type_ =
-        handle(FunctionTemplateInfo::cast(info->signature()));
+        handle(FunctionTemplateInfo::cast(info->signature()), isolate);
   }
 
   is_simple_api_call_ = true;
diff --git a/src/ic/handler-compiler.cc b/src/ic/handler-compiler.cc
index 2c8c092..6d153bb 100644
--- a/src/ic/handler-compiler.cc
+++ b/src/ic/handler-compiler.cc
@@ -9,7 +9,6 @@
 #include "src/ic/ic-inl.h"
 #include "src/ic/ic.h"
 #include "src/isolate-inl.h"
-#include "src/profiler/cpu-profiler.h"
 
 namespace v8 {
 namespace internal {
@@ -28,7 +27,7 @@
 Handle<Code> NamedLoadHandlerCompiler::ComputeLoadNonexistent(
     Handle<Name> name, Handle<Map> receiver_map) {
   Isolate* isolate = name->GetIsolate();
-  if (receiver_map->prototype()->IsNull()) {
+  if (receiver_map->prototype()->IsNull(isolate)) {
     // TODO(jkummerow/verwaest): If there is no prototype and the property
     // is nonexistent, introduce a builtin to handle this (fast properties
     // -> return undefined, dictionary properties -> do negative lookup).
@@ -51,7 +50,7 @@
   Handle<JSObject> last(JSObject::cast(receiver_map->prototype()));
   while (true) {
     if (current_map->is_dictionary_map()) cache_name = name;
-    if (current_map->prototype()->IsNull()) break;
+    if (current_map->prototype()->IsNull(isolate)) break;
     if (name->IsPrivate()) {
       // TODO(verwaest): Use nonexistent_private_symbol.
       cache_name = name;
@@ -79,7 +78,7 @@
                                               Handle<Name> name) {
   Code::Flags flags = Code::ComputeHandlerFlags(kind, cache_holder());
   Handle<Code> code = GetCodeWithFlags(flags, name);
-  PROFILE(isolate(), CodeCreateEvent(Logger::HANDLER_TAG,
+  PROFILE(isolate(), CodeCreateEvent(CodeEventListener::HANDLER_TAG,
                                      AbstractCode::cast(*code), *name));
 #ifdef DEBUG
   code->VerifyEmbeddedObjects();
@@ -226,7 +225,11 @@
 Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
     Handle<Name> name, Handle<AccessorInfo> callback) {
   Register reg = Frontend(name);
-  GenerateLoadCallback(reg, callback);
+  if (FLAG_runtime_call_stats) {
+    TailCallBuiltin(masm(), Builtins::kLoadIC_Slow);
+  } else {
+    GenerateLoadCallback(reg, callback);
+  }
   return GetCode(kind(), name);
 }
 
@@ -236,8 +239,12 @@
     int accessor_index) {
   DCHECK(call_optimization.is_simple_api_call());
   Register holder = Frontend(name);
-  GenerateApiAccessorCall(masm(), call_optimization, map(), receiver(),
-                          scratch2(), false, no_reg, holder, accessor_index);
+  if (FLAG_runtime_call_stats) {
+    TailCallBuiltin(masm(), Builtins::kLoadIC_Slow);
+  } else {
+    GenerateApiAccessorCall(masm(), call_optimization, map(), receiver(),
+                            scratch2(), false, no_reg, holder, accessor_index);
+  }
   return GetCode(kind(), name);
 }
 
@@ -420,7 +427,6 @@
   }
 }
 
-
 Handle<Code> NamedLoadHandlerCompiler::CompileLoadViaGetter(
     Handle<Name> name, int accessor_index, int expected_arguments) {
   Register holder = Frontend(name);
@@ -445,8 +451,7 @@
     PrototypeIterator::WhereToEnd end =
         name->IsPrivate() ? PrototypeIterator::END_AT_NON_HIDDEN
                           : PrototypeIterator::END_AT_NULL;
-    PrototypeIterator iter(isolate(), holder(),
-                           PrototypeIterator::START_AT_PROTOTYPE, end);
+    PrototypeIterator iter(isolate(), holder(), kStartAtPrototype, end);
     while (!iter.IsAtEnd()) {
       last = PrototypeIterator::GetCurrent<JSObject>(iter);
       iter.Advance();
@@ -563,9 +568,14 @@
     Handle<JSObject> object, Handle<Name> name,
     const CallOptimization& call_optimization, int accessor_index) {
   Register holder = Frontend(name);
-  GenerateApiAccessorCall(masm(), call_optimization, handle(object->map()),
-                          receiver(), scratch2(), true, value(), holder,
-                          accessor_index);
+  if (FLAG_runtime_call_stats) {
+    GenerateRestoreName(name);
+    TailCallBuiltin(masm(), Builtins::kStoreIC_Slow);
+  } else {
+    GenerateApiAccessorCall(masm(), call_optimization, handle(object->map()),
+                            receiver(), scratch2(), true, value(), holder,
+                            accessor_index);
+  }
   return GetCode(kind(), name);
 }
 
@@ -593,7 +603,8 @@
            *receiver_map == isolate()->get_initial_js_array_map(elements_kind));
 
       if (receiver_map->has_indexed_interceptor() &&
-          !receiver_map->GetIndexedInterceptor()->getter()->IsUndefined() &&
+          !receiver_map->GetIndexedInterceptor()->getter()->IsUndefined(
+              isolate()) &&
           !receiver_map->GetIndexedInterceptor()->non_masking()) {
         cached_stub = LoadIndexedInterceptorStub(isolate()).GetCode();
       } else if (IsSloppyArgumentsElements(elements_kind)) {
@@ -604,8 +615,7 @@
                                           convert_hole_to_undefined).GetCode();
       } else {
         DCHECK(elements_kind == DICTIONARY_ELEMENTS);
-        LoadICState state = LoadICState(kNoExtraICState);
-        cached_stub = LoadDictionaryElementStub(isolate(), state).GetCode();
+        cached_stub = LoadDictionaryElementStub(isolate()).GetCode();
       }
     }
 
diff --git a/src/ic/handler-compiler.h b/src/ic/handler-compiler.h
index e34cd68..a571f4f 100644
--- a/src/ic/handler-compiler.h
+++ b/src/ic/handler-compiler.h
@@ -205,8 +205,7 @@
                                                         Register prototype,
                                                         Label* miss);
 
-
-  Register scratch4() { return registers_[5]; }
+  Register scratch3() { return registers_[4]; }
 };
 
 
diff --git a/src/ic/ia32/access-compiler-ia32.cc b/src/ic/ia32/access-compiler-ia32.cc
index 1825202..3219f3d 100644
--- a/src/ic/ia32/access-compiler-ia32.cc
+++ b/src/ic/ia32/access-compiler-ia32.cc
@@ -18,19 +18,19 @@
 
 
 Register* PropertyAccessCompiler::load_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3, scratch4.
+  // receiver, name, scratch1, scratch2, scratch3.
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, ebx, eax, edi, no_reg};
+  static Register registers[] = {receiver, name, ebx, eax, edi};
   return registers;
 }
 
 
 Register* PropertyAccessCompiler::store_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3.
+  // receiver, name, scratch1, scratch2.
   Register receiver = StoreDescriptor::ReceiverRegister();
   Register name = StoreDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, ebx, edi, no_reg};
+  static Register registers[] = {receiver, name, ebx, edi};
   return registers;
 }
 
diff --git a/src/ic/ia32/handler-compiler-ia32.cc b/src/ic/ia32/handler-compiler-ia32.cc
index 37ab66d..2a883c7 100644
--- a/src/ic/ia32/handler-compiler-ia32.cc
+++ b/src/ic/ia32/handler-compiler-ia32.cc
@@ -199,7 +199,7 @@
   Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
   bool call_data_undefined = false;
   // Put call data in place.
-  if (api_call_info->data()->IsUndefined()) {
+  if (api_call_info->data()->IsUndefined(isolate)) {
     call_data_undefined = true;
     __ mov(data, Immediate(isolate->factory()->undefined_value()));
   } else {
@@ -237,12 +237,12 @@
     MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
     Register scratch, Label* miss) {
   Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
-  DCHECK(cell->value()->IsTheHole());
-  Factory* factory = masm->isolate()->factory();
-  Handle<WeakCell> weak_cell = factory->NewWeakCell(cell);
+  Isolate* isolate = masm->isolate();
+  DCHECK(cell->value()->IsTheHole(isolate));
+  Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
   __ LoadWeakValue(scratch, weak_cell, miss);
   __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
-         Immediate(factory->the_hole_value()));
+         Immediate(isolate->factory()->the_hole_value()));
   __ j(not_equal, miss);
 }
 
@@ -439,28 +439,25 @@
   DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
          !scratch2.is(scratch1));
 
-  if (FLAG_eliminate_prototype_chain_checks) {
-    Handle<Cell> validity_cell =
-        Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
-    if (!validity_cell.is_null()) {
-      DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid),
-                validity_cell->value());
-      // Operand::ForCell(...) points to the cell's payload!
-      __ cmp(Operand::ForCell(validity_cell),
-             Immediate(Smi::FromInt(Map::kPrototypeChainValid)));
-      __ j(not_equal, miss);
-    }
+  Handle<Cell> validity_cell =
+      Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+  if (!validity_cell.is_null()) {
+    DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
+    // Operand::ForCell(...) points to the cell's payload!
+    __ cmp(Operand::ForCell(validity_cell),
+           Immediate(Smi::FromInt(Map::kPrototypeChainValid)));
+    __ j(not_equal, miss);
+  }
 
-    // The prototype chain of primitives (and their JSValue wrappers) depends
-    // on the native context, which can't be guarded by validity cells.
-    // |object_reg| holds the native context specific prototype in this case;
-    // we need to check its map.
-    if (check == CHECK_ALL_MAPS) {
-      __ mov(scratch1, FieldOperand(object_reg, HeapObject::kMapOffset));
-      Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
-      __ CmpWeakValue(scratch1, cell, scratch2);
-      __ j(not_equal, miss);
-    }
+  // The prototype chain of primitives (and their JSValue wrappers) depends
+  // on the native context, which can't be guarded by validity cells.
+  // |object_reg| holds the native context specific prototype in this case;
+  // we need to check its map.
+  if (check == CHECK_ALL_MAPS) {
+    __ mov(scratch1, FieldOperand(object_reg, HeapObject::kMapOffset));
+    Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+    __ CmpWeakValue(scratch1, cell, scratch2);
+    __ j(not_equal, miss);
   }
 
   // Keep track of the current object in register reg.
@@ -496,8 +493,10 @@
            !current_map->is_access_check_needed());
 
     prototype = handle(JSObject::cast(current_map->prototype()));
-    if (current_map->is_dictionary_map() &&
-        !current_map->IsJSGlobalObjectMap()) {
+    if (current_map->IsJSGlobalObjectMap()) {
+      GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+                                name, scratch2, miss);
+    } else if (current_map->is_dictionary_map()) {
       DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
       if (!name->IsUniqueName()) {
         DCHECK(name->IsString());
@@ -507,34 +506,12 @@
              current->property_dictionary()->FindEntry(name) ==
                  NameDictionary::kNotFound);
 
-      if (FLAG_eliminate_prototype_chain_checks && depth > 1) {
+      if (depth > 1) {
         // TODO(jkummerow): Cache and re-use weak cell.
         __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
       }
       GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
                                        scratch2);
-
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-        __ mov(holder_reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-      }
-    } else {
-      Register map_reg = scratch1;
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ mov(map_reg, FieldOperand(reg, HeapObject::kMapOffset));
-      }
-      if (current_map->IsJSGlobalObjectMap()) {
-        GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
-                                  name, scratch2, miss);
-      } else if (!FLAG_eliminate_prototype_chain_checks &&
-                 (depth != 1 || check == CHECK_ALL_MAPS)) {
-        Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
-        __ CmpWeakValue(map_reg, cell, scratch2);
-        __ j(not_equal, miss);
-      }
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ mov(holder_reg, FieldOperand(map_reg, Map::kPrototypeOffset));
-      }
     }
 
     reg = holder_reg;  // From now on the object will be in holder_reg.
@@ -548,17 +525,8 @@
   // Log the check depth.
   LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
 
-  if (!FLAG_eliminate_prototype_chain_checks &&
-      (depth != 0 || check == CHECK_ALL_MAPS)) {
-    // Check the holder map.
-    __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-    Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
-    __ CmpWeakValue(scratch1, cell, scratch2);
-    __ j(not_equal, miss);
-  }
-
   bool return_holder = return_what == RETURN_HOLDER;
-  if (FLAG_eliminate_prototype_chain_checks && return_holder && depth != 0) {
+  if (return_holder && depth != 0) {
     __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
   }
 
@@ -604,7 +572,7 @@
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
-  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
 
   // Compile the interceptor call, followed by inline code to load the
   // property from further up the prototype chain if the call fails.
@@ -671,7 +639,7 @@
 
 void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
-  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
   // Call the runtime system to load the interceptor.
   __ pop(scratch2());  // save old return address
   PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
@@ -692,7 +660,7 @@
   __ push(holder_reg);
   // If the callback cannot leak, then push the callback directly,
   // otherwise wrap it in a weak cell.
-  if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+  if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
     __ Push(callback);
   } else {
     Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
diff --git a/src/ic/ia32/ic-ia32.cc b/src/ic/ia32/ic-ia32.cc
index d32e1c3..4fa990d 100644
--- a/src/ic/ia32/ic-ia32.cc
+++ b/src/ic/ia32/ic-ia32.cc
@@ -708,15 +708,6 @@
   __ TailCallRuntime(Runtime::kKeyedGetProperty);
 }
 
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
-  // This shouldn't be called.
-  // TODO(mvstanton): remove this method.
-  __ int3();
-  return;
-}
-
-
 static void StoreIC_PushArgs(MacroAssembler* masm) {
   Register receiver = StoreDescriptor::ReceiverRegister();
   Register name = StoreDescriptor::NameRegister();
@@ -836,8 +827,9 @@
   // condition code uses at the patched jump.
   uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
   if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, test=%p, delta=%d\n", address,
-           test_instruction_address, delta);
+    PrintF("[  patching ic at %p, test=%p, delta=%d\n",
+           static_cast<void*>(address),
+           static_cast<void*>(test_instruction_address), delta);
   }
 
   // Patch with a short conditional jump. Enabling means switching from a short
diff --git a/src/ic/ic-compiler.cc b/src/ic/ic-compiler.cc
index e89cb4b..af025fb 100644
--- a/src/ic/ic-compiler.cc
+++ b/src/ic/ic-compiler.cc
@@ -6,8 +6,6 @@
 
 #include "src/ic/handler-compiler.h"
 #include "src/ic/ic-inl.h"
-#include "src/profiler/cpu-profiler.h"
-
 
 namespace v8 {
 namespace internal {
@@ -15,6 +13,7 @@
 
 Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(
     Handle<Map> receiver_map, ExtraICState extra_ic_state) {
+  // TODO(ishell): remove extra_ic_state
   Isolate* isolate = receiver_map->GetIsolate();
   bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
   ElementsKind elements_kind = receiver_map->elements_kind();
@@ -42,8 +41,7 @@
   } else {
     DCHECK(receiver_map->has_dictionary_elements());
     TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadDictionaryElementStub);
-    stub = LoadDictionaryElementStub(isolate, LoadICState(extra_ic_state))
-               .GetCode();
+    stub = LoadDictionaryElementStub(isolate).GetCode();
   }
   return stub;
 }
diff --git a/src/ic/ic-state.cc b/src/ic/ic-state.cc
index bf1e45f..d157c92 100644
--- a/src/ic/ic-state.cc
+++ b/src/ic/ic-state.cc
@@ -257,10 +257,10 @@
 
   if (old_extra_ic_state == GetExtraICState()) {
     // Tagged operations can lead to non-truncating HChanges
-    if (left->IsUndefined() || left->IsBoolean()) {
+    if (left->IsUndefined(isolate_) || left->IsBoolean()) {
       left_kind_ = GENERIC;
     } else {
-      DCHECK(right->IsUndefined() || right->IsBoolean());
+      DCHECK(right->IsUndefined(isolate_) || right->IsBoolean());
       right_kind_ = GENERIC;
     }
   }
@@ -274,7 +274,7 @@
   if (object->IsBoolean() && is_truncating) {
     // Booleans will be automatically truncated by HChange.
     new_kind = INT32;
-  } else if (object->IsUndefined()) {
+  } else if (object->IsUndefined(isolate_)) {
     // Undefined will be automatically truncated by HChange.
     new_kind = is_truncating ? INT32 : NUMBER;
   } else if (object->IsSmi()) {
@@ -446,8 +446,9 @@
 
 // static
 CompareICState::State CompareICState::TargetState(
-    State old_state, State old_left, State old_right, Token::Value op,
-    bool has_inlined_smi_code, Handle<Object> x, Handle<Object> y) {
+    Isolate* isolate, State old_state, State old_left, State old_right,
+    Token::Value op, bool has_inlined_smi_code, Handle<Object> x,
+    Handle<Object> y) {
   switch (old_state) {
     case UNINITIALIZED:
       if (x->IsBoolean() && y->IsBoolean()) return BOOLEAN;
@@ -456,8 +457,8 @@
       if (Token::IsOrderedRelationalCompareOp(op)) {
         // Ordered comparisons treat undefined as NaN, so the
         // NUMBER stub will do the right thing.
-        if ((x->IsNumber() && y->IsUndefined()) ||
-            (y->IsNumber() && x->IsUndefined())) {
+        if ((x->IsNumber() && y->IsUndefined(isolate)) ||
+            (y->IsNumber() && x->IsUndefined(isolate))) {
           return NUMBER;
         }
       }
diff --git a/src/ic/ic-state.h b/src/ic/ic-state.h
index e1d33f8..b12b17d 100644
--- a/src/ic/ic-state.h
+++ b/src/ic/ic-state.h
@@ -193,13 +193,13 @@
 
   static const char* GetStateName(CompareICState::State state);
 
-  static State TargetState(State old_state, State old_left, State old_right,
-                           Token::Value op, bool has_inlined_smi_code,
-                           Handle<Object> x, Handle<Object> y);
+  static State TargetState(Isolate* isolate, State old_state, State old_left,
+                           State old_right, Token::Value op,
+                           bool has_inlined_smi_code, Handle<Object> x,
+                           Handle<Object> y);
 };
 
-
-class LoadICState final BASE_EMBEDDED {
+class LoadGlobalICState final BASE_EMBEDDED {
  private:
   class TypeofModeBits : public BitField<TypeofMode, 0, 1> {};
   STATIC_ASSERT(static_cast<int>(INSIDE_TYPEOF) == 0);
@@ -208,9 +208,10 @@
  public:
   static const uint32_t kNextBitFieldOffset = TypeofModeBits::kNext;
 
-  explicit LoadICState(ExtraICState extra_ic_state) : state_(extra_ic_state) {}
+  explicit LoadGlobalICState(ExtraICState extra_ic_state)
+      : state_(extra_ic_state) {}
 
-  explicit LoadICState(TypeofMode typeof_mode)
+  explicit LoadGlobalICState(TypeofMode typeof_mode)
       : state_(TypeofModeBits::encode(typeof_mode)) {}
 
   ExtraICState GetExtraICState() const { return state_; }
@@ -218,7 +219,7 @@
   TypeofMode typeof_mode() const { return TypeofModeBits::decode(state_); }
 
   static TypeofMode GetTypeofMode(ExtraICState state) {
-    return LoadICState(state).typeof_mode();
+    return LoadGlobalICState(state).typeof_mode();
   }
 };
 
diff --git a/src/ic/ic.cc b/src/ic/ic.cc
index 2dcb8d9..5d20420 100644
--- a/src/ic/ic.cc
+++ b/src/ic/ic.cc
@@ -5,8 +5,8 @@
 #include "src/ic/ic.h"
 
 #include "src/accessors.h"
+#include "src/api-arguments-inl.h"
 #include "src/api.h"
-#include "src/api-arguments.h"
 #include "src/arguments.h"
 #include "src/base/bits.h"
 #include "src/codegen.h"
@@ -16,14 +16,14 @@
 #include "src/frames-inl.h"
 #include "src/ic/call-optimization.h"
 #include "src/ic/handler-compiler.h"
-#include "src/ic/ic-inl.h"
 #include "src/ic/ic-compiler.h"
+#include "src/ic/ic-inl.h"
 #include "src/ic/stub-cache.h"
 #include "src/isolate-inl.h"
 #include "src/macro-assembler.h"
 #include "src/prototype.h"
-#include "src/runtime/runtime.h"
 #include "src/runtime/runtime-utils.h"
+#include "src/runtime/runtime.h"
 #include "src/tracing/trace-event.h"
 
 namespace v8 {
@@ -45,12 +45,6 @@
       return 'N';
     case GENERIC:
       return 'G';
-
-    // We never see the debugger states here, because the state is
-    // computed from the original code - not the patched code. Let
-    // these cases fall through to the unreachable code below.
-    case DEBUG_STUB:
-      break;
   }
   UNREACHABLE();
   return 0;
@@ -126,14 +120,13 @@
           casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
       modifier = GetTransitionMarkModifier(mode);
     }
-    PrintF(" (%c->%c%s) ", TransitionMarkFromState(old_state),
-           TransitionMarkFromState(new_state), modifier);
-#ifdef OBJECT_PRINT
-    OFStream os(stdout);
-    name->Print(os);
-#else
+    void* map = nullptr;
+    if (!receiver_map().is_null()) {
+      map = reinterpret_cast<void*>(*receiver_map());
+    }
+    PrintF(" (%c->%c%s) map=%p ", TransitionMarkFromState(old_state),
+           TransitionMarkFromState(new_state), modifier, map);
     name->ShortPrint(stdout);
-#endif
     PrintF("]\n");
   }
 }
@@ -184,11 +177,32 @@
   pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
   Code* target = this->target();
   kind_ = target->kind();
-  state_ = UseVector() ? nexus->StateFromFeedback() : target->ic_state();
+  state_ = UseVector() ? nexus->StateFromFeedback() : StateFromCode(target);
   old_state_ = state_;
   extra_ic_state_ = target->extra_ic_state();
 }
 
+InlineCacheState IC::StateFromCode(Code* code) {
+  Isolate* isolate = code->GetIsolate();
+  switch (code->kind()) {
+    case Code::BINARY_OP_IC: {
+      BinaryOpICState state(isolate, code->extra_ic_state());
+      return state.GetICState();
+    }
+    case Code::COMPARE_IC: {
+      CompareICStub stub(isolate, code->extra_ic_state());
+      return stub.GetICState();
+    }
+    case Code::TO_BOOLEAN_IC: {
+      ToBooleanICStub stub(isolate, code->extra_ic_state());
+      return stub.GetICState();
+    }
+    default:
+      if (code->is_debug_stub()) return UNINITIALIZED;
+      UNREACHABLE();
+      return UNINITIALIZED;
+  }
+}
 
 SharedFunctionInfo* IC::GetSharedFunctionInfo() const {
   // Compute the JavaScript frame for the frame pointer of this IC
@@ -223,7 +237,6 @@
   return host->kind() == Code::OPTIMIZED_FUNCTION;
 }
 
-
 static void LookupForRead(LookupIterator* it) {
   for (; it->IsFound(); it->Next()) {
     switch (it->state()) {
@@ -235,7 +248,8 @@
       case LookupIterator::INTERCEPTOR: {
         // If there is a getter, return; otherwise loop to perform the lookup.
         Handle<JSObject> holder = it->GetHolder<JSObject>();
-        if (!holder->GetNamedInterceptor()->getter()->IsUndefined()) {
+        if (!holder->GetNamedInterceptor()->getter()->IsUndefined(
+                it->isolate())) {
           return;
         }
         break;
@@ -257,9 +271,14 @@
 
 bool IC::ShouldRecomputeHandler(Handle<Object> receiver, Handle<String> name) {
   if (!RecomputeHandlerForName(name)) return false;
+
   DCHECK(UseVector());
   maybe_handler_ = nexus()->FindHandlerForMap(receiver_map());
 
+  // This is a contextual access, always just update the handler and stay
+  // monomorphic.
+  if (kind() == Code::LOAD_GLOBAL_IC) return true;
+
   // The current map wasn't handled yet. There's no reason to stay monomorphic,
   // *unless* we're moving from a deprecated map to its replacement, or
   // to a more general elements kind.
@@ -275,15 +294,6 @@
                                                receiver_map()->elements_kind());
   }
 
-  if (receiver->IsJSGlobalObject()) {
-    Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(receiver);
-    LookupIterator it(global, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
-    if (it.state() == LookupIterator::ACCESS_CHECK) return false;
-    if (!it.IsFound()) return false;
-    if (!it.GetHolder<JSReceiver>()->IsJSGlobalObject()) return false;
-    return it.property_details().cell_type() == PropertyCellType::kConstant;
-  }
-
   return true;
 }
 
@@ -304,7 +314,7 @@
   update_receiver_map(receiver);
   if (!name->IsString()) return;
   if (state() != MONOMORPHIC && state() != POLYMORPHIC) return;
-  if (receiver->IsUndefined() || receiver->IsNull()) return;
+  if (receiver->IsUndefined(isolate()) || receiver->IsNull(isolate())) return;
 
   // Remove the target from the code cache if it became invalid
   // because of changes in the prototype chain to avoid hitting it
@@ -359,7 +369,6 @@
       }
       break;
     case RECOMPUTE_HANDLER:
-    case DEBUG_STUB:
       UNREACHABLE();
   }
 }
@@ -384,8 +393,8 @@
 
   DCHECK(old_target->is_inline_cache_stub());
   DCHECK(target->is_inline_cache_stub());
-  State old_state = old_target->ic_state();
-  State new_state = target->ic_state();
+  State old_state = StateFromCode(old_target);
+  State new_state = StateFromCode(target);
 
   Isolate* isolate = target->GetIsolate();
   Code* host =
@@ -455,6 +464,12 @@
   OnTypeFeedbackChanged(isolate, host);
 }
 
+void LoadGlobalIC::Clear(Isolate* isolate, Code* host,
+                         LoadGlobalICNexus* nexus) {
+  if (IsCleared(nexus)) return;
+  nexus->ConfigureUninitialized();
+  OnTypeFeedbackChanged(isolate, host);
+}
 
 void StoreIC::Clear(Isolate* isolate, Code* host, StoreICNexus* nexus) {
   if (IsCleared(nexus)) return;
@@ -486,8 +501,9 @@
 // static
 Handle<Code> KeyedLoadIC::ChooseMegamorphicStub(Isolate* isolate,
                                                 ExtraICState extra_state) {
+  // TODO(ishell): remove extra_ic_state
   if (FLAG_compiled_keyed_generic_loads) {
-    return KeyedLoadGenericStub(isolate, LoadICState(extra_state)).GetCode();
+    return KeyedLoadGenericStub(isolate).GetCode();
   } else {
     return isolate->builtins()->KeyedLoadIC_Megamorphic();
   }
@@ -532,6 +548,9 @@
   if (kind() == Code::LOAD_IC) {
     LoadICNexus* nexus = casted_nexus<LoadICNexus>();
     nexus->ConfigureMonomorphic(map, handler);
+  } else if (kind() == Code::LOAD_GLOBAL_IC) {
+    LoadGlobalICNexus* nexus = casted_nexus<LoadGlobalICNexus>();
+    nexus->ConfigureHandlerMode(handler);
   } else if (kind() == Code::KEYED_LOAD_IC) {
     KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
     nexus->ConfigureMonomorphic(name, map, handler);
@@ -588,63 +607,21 @@
 MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
   // If the object is undefined or null it's illegal to try to get any
   // of its properties; throw a TypeError in that case.
-  if (object->IsUndefined() || object->IsNull()) {
+  if (object->IsUndefined(isolate()) || object->IsNull(isolate())) {
     return TypeError(MessageTemplate::kNonObjectPropertyLoad, object, name);
   }
 
-  // Check if the name is trivially convertible to an index and get
-  // the element or char if so.
-  uint32_t index;
-  if (kind() == Code::KEYED_LOAD_IC && name->AsArrayIndex(&index)) {
-    // Rewrite to the generic keyed load stub.
-    if (FLAG_use_ic) {
-      DCHECK(UseVector());
-      ConfigureVectorState(MEGAMORPHIC, name);
-      TRACE_GENERIC_IC(isolate(), "LoadIC", "name as array index");
-      TRACE_IC("LoadIC", name);
-    }
-    Handle<Object> result;
-    ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
-                               Object::GetElement(isolate(), object, index),
-                               Object);
-    return result;
-  }
-
   bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic;
 
-  if (object->IsJSGlobalObject() && name->IsString()) {
-    // Look up in script context table.
-    Handle<String> str_name = Handle<String>::cast(name);
-    Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(object);
-    Handle<ScriptContextTable> script_contexts(
-        global->native_context()->script_context_table());
-
-    ScriptContextTable::LookupResult lookup_result;
-    if (ScriptContextTable::Lookup(script_contexts, str_name, &lookup_result)) {
-      Handle<Object> result =
-          FixedArray::get(*ScriptContextTable::GetContext(
-                              script_contexts, lookup_result.context_index),
-                          lookup_result.slot_index, isolate());
-      if (*result == *isolate()->factory()->the_hole_value()) {
-        // Do not install stubs and stay pre-monomorphic for
-        // uninitialized accesses.
-        return ReferenceError(name);
-      }
-
-      if (use_ic && LoadScriptContextFieldStub::Accepted(&lookup_result)) {
-        TRACE_HANDLER_STATS(isolate(), LoadIC_LoadScriptContextFieldStub);
-        LoadScriptContextFieldStub stub(isolate(), &lookup_result);
-        PatchCache(name, stub.GetCode());
-      }
-      return result;
-    }
+  if (state() != UNINITIALIZED) {
+    JSObject::MakePrototypesFast(object, kStartAtReceiver, isolate());
+    update_receiver_map(object);
   }
-
   // Named lookup in the object.
   LookupIterator it(object, name);
   LookupForRead(&it);
 
-  if (it.IsFound() || !ShouldThrowReferenceError(object)) {
+  if (it.IsFound() || !ShouldThrowReferenceError()) {
     // Update inline cache and stub cache.
     if (use_ic) UpdateCaches(&it);
 
@@ -655,7 +632,7 @@
                                Object);
     if (it.IsFound()) {
       return result;
-    } else if (!ShouldThrowReferenceError(object)) {
+    } else if (!ShouldThrowReferenceError()) {
       LOG(isolate(), SuspectReadEvent(*name, *object));
       return result;
     }
@@ -663,6 +640,38 @@
   return ReferenceError(name);
 }
 
+MaybeHandle<Object> LoadGlobalIC::Load(Handle<Name> name) {
+  Handle<JSGlobalObject> global = isolate()->global_object();
+
+  if (name->IsString()) {
+    // Look up in script context table.
+    Handle<String> str_name = Handle<String>::cast(name);
+    Handle<ScriptContextTable> script_contexts(
+        global->native_context()->script_context_table());
+
+    ScriptContextTable::LookupResult lookup_result;
+    if (ScriptContextTable::Lookup(script_contexts, str_name, &lookup_result)) {
+      Handle<Object> result =
+          FixedArray::get(*ScriptContextTable::GetContext(
+                              script_contexts, lookup_result.context_index),
+                          lookup_result.slot_index, isolate());
+      if (result->IsTheHole(isolate())) {
+        // Do not install stubs and stay pre-monomorphic for
+        // uninitialized accesses.
+        return ReferenceError(name);
+      }
+
+      if (FLAG_use_ic && LoadScriptContextFieldStub::Accepted(&lookup_result)) {
+        TRACE_HANDLER_STATS(isolate(), LoadIC_LoadScriptContextFieldStub);
+        LoadScriptContextFieldStub stub(isolate(), &lookup_result);
+        PatchCache(name, stub.GetCode());
+        TRACE_IC("LoadGlobalIC", name);
+      }
+      return result;
+    }
+  }
+  return LoadIC::Load(global, name);
+}
 
 static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
                                        Handle<Map> new_receiver_map) {
@@ -780,6 +789,11 @@
       break;
     case RECOMPUTE_HANDLER:
     case MONOMORPHIC:
+      if (kind() == Code::LOAD_GLOBAL_IC) {
+        UpdateMonomorphicIC(code, name);
+        break;
+      }
+    // Fall through.
     case POLYMORPHIC:
       if (!is_keyed() || state() == RECOMPUTE_HANDLER) {
         if (UpdatePolymorphicIC(name, code)) break;
@@ -796,35 +810,34 @@
       DCHECK(UseVector());
       vector_set_ = true;
       break;
-    case DEBUG_STUB:
-      break;
     case GENERIC:
       UNREACHABLE();
       break;
   }
 }
 
-Handle<Code> LoadIC::initialize_stub_in_optimized_code(
-    Isolate* isolate, ExtraICState extra_state, State initialization_state) {
-  return LoadICStub(isolate, LoadICState(extra_state)).GetCode();
+Handle<Code> LoadIC::initialize_stub_in_optimized_code(Isolate* isolate) {
+  if (FLAG_tf_load_ic_stub) {
+    return LoadICTFStub(isolate).GetCode();
+  }
+  return LoadICStub(isolate).GetCode();
+}
+
+Handle<Code> LoadGlobalIC::initialize_stub_in_optimized_code(
+    Isolate* isolate, ExtraICState extra_state) {
+  return LoadGlobalICStub(isolate, LoadGlobalICState(extra_state)).GetCode();
 }
 
 Handle<Code> KeyedLoadIC::initialize_stub_in_optimized_code(
-    Isolate* isolate, State initialization_state, ExtraICState extra_state) {
-  if (initialization_state != MEGAMORPHIC) {
-    return KeyedLoadICStub(isolate, LoadICState(extra_state)).GetCode();
-  }
-  return isolate->builtins()->KeyedLoadIC_Megamorphic();
+    Isolate* isolate, ExtraICState extra_state) {
+  // TODO(ishell): remove extra_ic_state
+  return KeyedLoadICStub(isolate).GetCode();
 }
 
-
 Handle<Code> KeyedStoreIC::initialize_stub_in_optimized_code(
-    Isolate* isolate, LanguageMode language_mode, State initialization_state) {
+    Isolate* isolate, LanguageMode language_mode) {
   StoreICState state = StoreICState(language_mode);
-  if (initialization_state != MEGAMORPHIC) {
-    return VectorKeyedStoreICStub(isolate, state).GetCode();
-  }
-  return ChooseMegamorphicStub(isolate, state.GetExtraICState());
+  return VectorKeyedStoreICStub(isolate, state).GetCode();
 }
 
 
@@ -884,7 +897,7 @@
 
 
 void LoadIC::UpdateCaches(LookupIterator* lookup) {
-  if (state() == UNINITIALIZED) {
+  if (state() == UNINITIALIZED && kind() != Code::LOAD_GLOBAL_IC) {
     // This is the first time we execute this inline cache. Set the target to
     // the pre monomorphic stub to delay setting the monomorphic state.
     ConfigureVectorState(PREMONOMORPHIC, Handle<Object>());
@@ -897,7 +910,7 @@
       lookup->state() == LookupIterator::ACCESS_CHECK) {
     code = slow_stub();
   } else if (!lookup->IsFound()) {
-    if (kind() == Code::LOAD_IC) {
+    if (kind() == Code::LOAD_IC || kind() == Code::LOAD_GLOBAL_IC) {
       code = NamedLoadHandlerCompiler::ComputeLoadNonexistent(lookup->name(),
                                                               receiver_map());
       // TODO(jkummerow/verwaest): Introduce a builtin that handles this case.
@@ -906,21 +919,41 @@
       code = slow_stub();
     }
   } else {
-    if (lookup->state() == LookupIterator::ACCESSOR) {
+    if (kind() == Code::LOAD_GLOBAL_IC &&
+        lookup->state() == LookupIterator::DATA &&
+        lookup->GetHolder<Object>()->IsJSGlobalObject()) {
+#if DEBUG
+      Handle<Object> holder = lookup->GetHolder<Object>();
+      Handle<Object> receiver = lookup->GetReceiver();
+      DCHECK_EQ(*receiver, *holder);
+#endif
+      // Now update the cell in the feedback vector.
+      LoadGlobalICNexus* nexus = casted_nexus<LoadGlobalICNexus>();
+      nexus->ConfigurePropertyCellMode(lookup->GetPropertyCell());
+      TRACE_IC("LoadGlobalIC", lookup->name());
+      return;
+    } else if (lookup->state() == LookupIterator::ACCESSOR) {
       if (!IsCompatibleReceiver(lookup, receiver_map())) {
         TRACE_GENERIC_IC(isolate(), "LoadIC", "incompatible receiver type");
         code = slow_stub();
       }
     } else if (lookup->state() == LookupIterator::INTERCEPTOR) {
-      // Perform a lookup behind the interceptor. Copy the LookupIterator since
-      // the original iterator will be used to fetch the value.
-      LookupIterator it = *lookup;
-      it.Next();
-      LookupForRead(&it);
-      if (it.state() == LookupIterator::ACCESSOR &&
-          !IsCompatibleReceiver(&it, receiver_map())) {
-        TRACE_GENERIC_IC(isolate(), "LoadIC", "incompatible receiver type");
+      if (kind() == Code::LOAD_GLOBAL_IC) {
+        // The interceptor handler requires name but it is not passed explicitly
+        // to LoadGlobalIC and the LoadGlobalIC dispatcher also does not load
+        // it so we will just use slow stub.
         code = slow_stub();
+      } else {
+        // Perform a lookup behind the interceptor. Copy the LookupIterator
+        // since the original iterator will be used to fetch the value.
+        LookupIterator it = *lookup;
+        it.Next();
+        LookupForRead(&it);
+        if (it.state() == LookupIterator::ACCESSOR &&
+            !IsCompatibleReceiver(&it, receiver_map())) {
+          TRACE_GENERIC_IC(isolate(), "LoadIC", "incompatible receiver type");
+          code = slow_stub();
+        }
       }
     }
     if (code.is_null()) code = ComputeHandler(lookup);
@@ -947,7 +980,8 @@
       lookup->GetReceiver().is_identical_to(lookup->GetHolder<JSObject>());
   CacheHolderFlag flag;
   Handle<Map> stub_holder_map;
-  if (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC) {
+  if (kind() == Code::LOAD_IC || kind() == Code::LOAD_GLOBAL_IC ||
+      kind() == Code::KEYED_LOAD_IC) {
     stub_holder_map = IC::GetHandlerCacheHolder(
         receiver_map(), receiver_is_holder, isolate(), &flag);
   } else {
@@ -1040,13 +1074,6 @@
         FieldIndex index = FieldIndex::ForInObjectOffset(object_offset, *map);
         return SimpleFieldLoad(index);
       }
-      if (Accessors::IsJSArrayBufferViewFieldAccessor(map, lookup->name(),
-                                                      &object_offset)) {
-        TRACE_HANDLER_STATS(isolate(), LoadIC_ArrayBufferViewLoadFieldStub);
-        FieldIndex index = FieldIndex::ForInObjectOffset(object_offset, *map);
-        ArrayBufferViewLoadFieldStub stub(isolate(), index);
-        return stub.GetCode();
-      }
 
       if (IsCompatibleReceiver(lookup, map)) {
         Handle<Object> accessors = lookup->GetAccessors();
@@ -1089,7 +1116,7 @@
 
     case LookupIterator::DATA: {
       if (lookup->is_dictionary_holder()) {
-        if (kind() != Code::LOAD_IC) {
+        if (kind() != Code::LOAD_IC && kind() != Code::LOAD_GLOBAL_IC) {
           TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
           return slow_stub();
         }
@@ -1166,7 +1193,7 @@
   Handle<Map> map = receiver_map();
   switch (lookup->state()) {
     case LookupIterator::INTERCEPTOR: {
-      DCHECK(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+      DCHECK(!holder->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
       TRACE_HANDLER_STATS(isolate(), LoadIC_LoadInterceptor);
       NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
       // Perform a lookup behind the interceptor. Copy the LookupIterator since
@@ -1182,8 +1209,6 @@
       int object_offset;
       DCHECK(!Accessors::IsJSObjectFieldAccessor(map, lookup->name(),
                                                  &object_offset));
-      DCHECK(!Accessors::IsJSArrayBufferViewFieldAccessor(map, lookup->name(),
-                                                          &object_offset));
 #endif
 
       DCHECK(IsCompatibleReceiver(lookup, map));
@@ -1200,7 +1225,6 @@
           int index = lookup->GetAccessorIndex();
           Handle<Code> code = compiler.CompileLoadCallback(
               lookup->name(), call_optimization, index);
-          if (FLAG_runtime_call_stats) return slow_stub();
           return code;
         }
         TRACE_HANDLER_STATS(isolate(), LoadIC_LoadViaGetter);
@@ -1220,7 +1244,6 @@
         TRACE_HANDLER_STATS(isolate(), LoadIC_LoadCallback);
         NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
         Handle<Code> code = compiler.CompileLoadCallback(lookup->name(), info);
-        if (FLAG_runtime_call_stats) return slow_stub();
         return code;
       }
       UNREACHABLE();
@@ -1228,7 +1251,7 @@
 
     case LookupIterator::DATA: {
       if (lookup->is_dictionary_holder()) {
-        DCHECK(kind() == Code::LOAD_IC);
+        DCHECK(kind() == Code::LOAD_IC || kind() == Code::LOAD_GLOBAL_IC);
         DCHECK(holder->IsJSGlobalObject());
         TRACE_HANDLER_STATS(isolate(), LoadIC_LoadGlobal);
         NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
@@ -1281,7 +1304,7 @@
         key = handle(Smi::FromInt(int_value), isolate);
       }
     }
-  } else if (key->IsUndefined()) {
+  } else if (key->IsUndefined(isolate)) {
     key = isolate->factory()->undefined_string();
   }
   return key;
@@ -1373,7 +1396,10 @@
   // internalized string directly or is representable as a smi.
   key = TryConvertKey(key, isolate());
 
-  if (key->IsInternalizedString() || key->IsSymbol()) {
+  uint32_t index;
+  if ((key->IsInternalizedString() &&
+       !String::cast(*key)->AsArrayIndex(&index)) ||
+      key->IsSymbol()) {
     ASSIGN_RETURN_ON_EXCEPTION(isolate(), load_handle,
                                LoadIC::Load(object, Handle<Name>::cast(key)),
                                Object);
@@ -1421,9 +1447,9 @@
         InterceptorInfo* info = holder->GetNamedInterceptor();
         if (it->HolderIsReceiverOrHiddenPrototype()) {
           return !info->non_masking() && receiver.is_identical_to(holder) &&
-                 !info->setter()->IsUndefined();
-        } else if (!info->getter()->IsUndefined() ||
-                   !info->query()->IsUndefined()) {
+                 !info->setter()->IsUndefined(it->isolate());
+        } else if (!info->getter()->IsUndefined(it->isolate()) ||
+                   !info->query()->IsUndefined(it->isolate())) {
           return false;
         }
         break;
@@ -1472,24 +1498,6 @@
 MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
                                    Handle<Object> value,
                                    JSReceiver::StoreFromKeyed store_mode) {
-  // Check if the name is trivially convertible to an index and set the element.
-  uint32_t index;
-  if (kind() == Code::KEYED_STORE_IC && name->AsArrayIndex(&index)) {
-    // Rewrite to the generic keyed store stub.
-    if (FLAG_use_ic) {
-      DCHECK(UseVector());
-      ConfigureVectorState(MEGAMORPHIC, name);
-      TRACE_IC("StoreIC", name);
-      TRACE_GENERIC_IC(isolate(), "StoreIC", "name as array index");
-    }
-    Handle<Object> result;
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate(), result,
-        Object::SetElement(isolate(), object, index, value, language_mode()),
-        Object);
-    return result;
-  }
-
   if (object->IsJSGlobalObject() && name->IsString()) {
     // Look up in script context table.
     Handle<String> str_name = Handle<String>::cast(name);
@@ -1508,7 +1516,7 @@
       Handle<Object> previous_value =
           FixedArray::get(*script_context, lookup_result.slot_index, isolate());
 
-      if (*previous_value == *isolate()->factory()->the_hole_value()) {
+      if (previous_value->IsTheHole(isolate())) {
         // Do not install stubs and stay pre-monomorphic for
         // uninitialized accesses.
         return ReferenceError(name);
@@ -1538,10 +1546,13 @@
 
   // If the object is undefined or null it's illegal to try to set any
   // properties on it; throw a TypeError in that case.
-  if (object->IsUndefined() || object->IsNull()) {
+  if (object->IsUndefined(isolate()) || object->IsNull(isolate())) {
     return TypeError(MessageTemplate::kNonObjectPropertyStore, object, name);
   }
 
+  if (state() != UNINITIALIZED) {
+    JSObject::MakePrototypesFast(object, kStartAtPrototype, isolate());
+  }
   LookupIterator it(object, name);
   if (FLAG_use_ic) UpdateCaches(&it, value, store_mode);
 
@@ -1558,27 +1569,12 @@
   return code;
 }
 
-
 Handle<Code> StoreIC::initialize_stub_in_optimized_code(
-    Isolate* isolate, LanguageMode language_mode, State initialization_state) {
-  DCHECK(initialization_state == UNINITIALIZED ||
-         initialization_state == PREMONOMORPHIC ||
-         initialization_state == MEGAMORPHIC);
-  if (initialization_state != MEGAMORPHIC) {
-    VectorStoreICStub stub(isolate, StoreICState(language_mode));
-    return stub.GetCode();
-  }
-
-  return is_strict(language_mode)
-             ? isolate->builtins()->StoreIC_Megamorphic_Strict()
-             : isolate->builtins()->StoreIC_Megamorphic();
+    Isolate* isolate, LanguageMode language_mode) {
+  VectorStoreICStub stub(isolate, StoreICState(language_mode));
+  return stub.GetCode();
 }
 
-Handle<Code> StoreIC::slow_stub() const {
-  return isolate()->builtins()->StoreIC_Slow();
-}
-
-
 void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
                            JSReceiver::StoreFromKeyed store_mode) {
   if (state() == UNINITIALIZED) {
@@ -1641,7 +1637,7 @@
     }
 
     case LookupIterator::INTERCEPTOR: {
-      DCHECK(!holder->GetNamedInterceptor()->setter()->IsUndefined());
+      DCHECK(!holder->GetNamedInterceptor()->setter()->IsUndefined(isolate()));
       TRACE_HANDLER_STATS(isolate(), StoreIC_StoreInterceptorStub);
       StoreInterceptorStub stub(isolate());
       return stub.GetCode();
@@ -1797,7 +1793,6 @@
         NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
         Handle<Code> code = compiler.CompileStoreCallback(
             receiver, lookup->name(), info, language_mode());
-        if (FLAG_runtime_call_stats) return slow_stub();
         return code;
       } else {
         DCHECK(accessors->IsAccessorPair());
@@ -1812,7 +1807,6 @@
           Handle<Code> code = compiler.CompileStoreCallback(
               receiver, lookup->name(), call_optimization,
               lookup->GetAccessorIndex());
-          if (FLAG_runtime_call_stats) return slow_stub();
           return code;
         }
         TRACE_HANDLER_STATS(isolate(), StoreIC_StoreViaSetter);
@@ -2268,32 +2262,111 @@
   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
   HandleScope scope(isolate);
   Handle<Object> receiver = args.at<Object>(0);
-  Handle<Name> key = args.at<Name>(1);
-  Handle<Object> result;
 
-  DCHECK(args.length() == 4);
+  DCHECK_EQ(4, args.length());
   Handle<Smi> slot = args.at<Smi>(2);
   Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
   FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
   // A monomorphic or polymorphic KeyedLoadIC with a string key can call the
   // LoadIC miss handler if the handler misses. Since the vector Nexus is
   // set up outside the IC, handle that here.
-  if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::LOAD_IC) {
+  FeedbackVectorSlotKind kind = vector->GetKind(vector_slot);
+  if (kind == FeedbackVectorSlotKind::LOAD_IC) {
+    Handle<Name> key = args.at<Name>(1);
     LoadICNexus nexus(vector, vector_slot);
     LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
     ic.UpdateState(receiver, key);
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+    RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
+
+  } else if (kind == FeedbackVectorSlotKind::LOAD_GLOBAL_IC) {
+    Handle<Name> key(vector->GetName(vector_slot), isolate);
+    DCHECK_NE(*key, *isolate->factory()->empty_string());
+    DCHECK_EQ(*isolate->global_object(), *receiver);
+    LoadGlobalICNexus nexus(vector, vector_slot);
+    LoadGlobalIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+    ic.UpdateState(receiver, key);
+    RETURN_RESULT_OR_FAILURE(isolate, ic.Load(key));
+
   } else {
-    DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC,
-              vector->GetKind(vector_slot));
+    Handle<Name> key = args.at<Name>(1);
+    DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC, kind);
     KeyedLoadICNexus nexus(vector, vector_slot);
     KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
     ic.UpdateState(receiver, key);
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+    RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
   }
+}
+
+// Used from ic-<arch>.cc.
+RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Miss) {
+  TimerEventScope<TimerEventIcMiss> timer(isolate);
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  Handle<JSGlobalObject> global = isolate->global_object();
+  Handle<Smi> slot = args.at<Smi>(0);
+  Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(1);
+  FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
+  DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
+            vector->GetKind(vector_slot));
+  Handle<String> name(vector->GetName(vector_slot), isolate);
+  DCHECK_NE(*name, *isolate->factory()->empty_string());
+
+  LoadGlobalICNexus nexus(vector, vector_slot);
+  LoadGlobalIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+  ic.UpdateState(global, name);
+
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(name));
   return *result;
 }
 
+RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Slow) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_SMI_ARG_CHECKED(slot, 0);
+  CONVERT_ARG_HANDLE_CHECKED(TypeFeedbackVector, vector, 1);
+
+  FeedbackVectorSlot vector_slot = vector->ToSlot(slot);
+  DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
+            vector->GetKind(vector_slot));
+  Handle<String> name(vector->GetName(vector_slot), isolate);
+  DCHECK_NE(*name, *isolate->factory()->empty_string());
+
+  Handle<JSGlobalObject> global = isolate->global_object();
+
+  Handle<ScriptContextTable> script_contexts(
+      global->native_context()->script_context_table());
+
+  ScriptContextTable::LookupResult lookup_result;
+  if (ScriptContextTable::Lookup(script_contexts, name, &lookup_result)) {
+    Handle<Context> script_context = ScriptContextTable::GetContext(
+        script_contexts, lookup_result.context_index);
+    Handle<Object> result =
+        FixedArray::get(*script_context, lookup_result.slot_index, isolate);
+    if (*result == *isolate->factory()->the_hole_value()) {
+      THROW_NEW_ERROR_RETURN_FAILURE(
+          isolate, NewReferenceError(MessageTemplate::kNotDefined, name));
+    }
+    return *result;
+  }
+
+  Handle<Object> result;
+  bool is_found = false;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result,
+      Runtime::GetObjectProperty(isolate, global, name, &is_found));
+  if (!is_found) {
+    LoadICNexus nexus(isolate);
+    LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+    // It is actually a LoadGlobalICs here but the predicate handles this case
+    // properly.
+    if (ic.ShouldThrowReferenceError()) {
+      THROW_NEW_ERROR_RETURN_FAILURE(
+          isolate, NewReferenceError(MessageTemplate::kNotDefined, name));
+    }
+  }
+  return *result;
+}
 
 // Used from ic-<arch>.cc
 RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
@@ -2302,7 +2375,6 @@
   HandleScope scope(isolate);
   Handle<Object> receiver = args.at<Object>(0);
   Handle<Object> key = args.at<Object>(1);
-  Handle<Object> result;
 
   DCHECK(args.length() == 4);
   Handle<Smi> slot = args.at<Smi>(2);
@@ -2311,8 +2383,7 @@
   KeyedLoadICNexus nexus(vector, vector_slot);
   KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
   ic.UpdateState(receiver, key);
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
 }
 
 
@@ -2322,7 +2393,6 @@
   HandleScope scope(isolate);
   Handle<Object> receiver = args.at<Object>(0);
   Handle<Object> key = args.at<Object>(1);
-  Handle<Object> result;
 
   DCHECK(args.length() == 4);
   Handle<Smi> slot = args.at<Smi>(2);
@@ -2331,9 +2401,7 @@
   KeyedLoadICNexus nexus(vector, vector_slot);
   KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
   ic.UpdateState(receiver, key);
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
-
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
 }
 
 
@@ -2345,7 +2413,6 @@
   Handle<Object> receiver = args.at<Object>(0);
   Handle<Name> key = args.at<Name>(1);
   Handle<Object> value = args.at<Object>(2);
-  Handle<Object> result;
 
   DCHECK(args.length() == 5 || args.length() == 6);
   Handle<Smi> slot = args.at<Smi>(3);
@@ -2355,18 +2422,15 @@
     StoreICNexus nexus(vector, vector_slot);
     StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
     ic.UpdateState(receiver, key);
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                       ic.Store(receiver, key, value));
+    RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
   } else {
     DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC,
               vector->GetKind(vector_slot));
     KeyedStoreICNexus nexus(vector, vector_slot);
     KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
     ic.UpdateState(receiver, key);
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                       ic.Store(receiver, key, value));
+    RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
   }
-  return *result;
 }
 
 
@@ -2377,7 +2441,6 @@
   Handle<Object> receiver = args.at<Object>(0);
   Handle<Name> key = args.at<Name>(1);
   Handle<Object> value = args.at<Object>(2);
-  Handle<Object> result;
 
   int length = args.length();
   DCHECK(length == 5 || length == 6);
@@ -2407,18 +2470,15 @@
     StoreICNexus nexus(vector, vector_slot);
     StoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
     ic.UpdateState(receiver, key);
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                       ic.Store(receiver, key, value));
+    RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
   } else {
     DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC,
               vector->GetKind(vector_slot));
     KeyedStoreICNexus nexus(vector, vector_slot);
     KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
     ic.UpdateState(receiver, key);
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                       ic.Store(receiver, key, value));
+    RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
   }
-  return *result;
 }
 
 
@@ -2430,7 +2490,6 @@
   Handle<Object> receiver = args.at<Object>(0);
   Handle<Object> key = args.at<Object>(1);
   Handle<Object> value = args.at<Object>(2);
-  Handle<Object> result;
 
   DCHECK(args.length() == 5);
   Handle<Smi> slot = args.at<Smi>(3);
@@ -2439,9 +2498,7 @@
   KeyedStoreICNexus nexus(vector, vector_slot);
   KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
   ic.UpdateState(receiver, key);
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     ic.Store(receiver, key, value));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
 }
 
 
@@ -2452,7 +2509,6 @@
   Handle<Object> receiver = args.at<Object>(0);
   Handle<Object> key = args.at<Object>(1);
   Handle<Object> value = args.at<Object>(2);
-  Handle<Object> result;
 
   DCHECK(args.length() == 5);
   Handle<Smi> slot = args.at<Smi>(3);
@@ -2461,9 +2517,7 @@
   KeyedStoreICNexus nexus(vector, vector_slot);
   KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
   ic.UpdateState(receiver, key);
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     ic.Store(receiver, key, value));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
 }
 
 
@@ -2477,11 +2531,9 @@
   StoreICNexus nexus(isolate);
   StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
   language_mode = ic.language_mode();
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
+  RETURN_RESULT_OR_FAILURE(
+      isolate,
       Runtime::SetObjectProperty(isolate, object, key, value, language_mode));
-  return *result;
 }
 
 
@@ -2495,11 +2547,9 @@
   KeyedStoreICNexus nexus(isolate);
   KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
   language_mode = ic.language_mode();
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
+  RETURN_RESULT_OR_FAILURE(
+      isolate,
       Runtime::SetObjectProperty(isolate, object, key, value, language_mode));
-  return *result;
 }
 
 
@@ -2522,11 +2572,9 @@
     JSObject::TransitionElementsKind(Handle<JSObject>::cast(object),
                                      map->elements_kind());
   }
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
+  RETURN_RESULT_OR_FAILURE(
+      isolate,
       Runtime::SetObjectProperty(isolate, object, key, value, language_mode));
-  return *result;
 }
 
 
@@ -2535,27 +2583,6 @@
     Handle<Object> right) {
   BinaryOpICState state(isolate(), extra_ic_state());
 
-#ifdef V8_TARGET_ARCH_X64
-  // Crash instrumentation for crbug.com/621147.
-  uintptr_t left_raw = reinterpret_cast<uintptr_t>(*left);
-  uintptr_t hole_raw =
-      reinterpret_cast<uintptr_t>(isolate()->heap()->the_hole_value());
-  if ((hole_raw & ((1ull << 32) - 1)) == (left_raw & ((1ull << 32) - 1))) {
-    Code* c = GetCode();
-    Code::Kind kind = c->kind();
-    int instruction_size = c->instruction_size() + 2 * sizeof(Address);
-    byte* instructions = static_cast<byte*>(alloca(instruction_size));
-    Address* start = reinterpret_cast<Address*>(instructions);
-    start[0] = fp();
-    start[1] = pc();
-    for (int i = 2 * sizeof(Address); i < instruction_size; i++) {
-      instructions[i] = c->instruction_start()[i];
-    }
-    isolate()->PushStackTraceAndDie(0xBAAAAAAD, instructions, fp(),
-                                    static_cast<unsigned int>(kind));
-  }
-#endif  // V8_TARGET_ARCH_X64
-
   // Compute the actual result using the builtin for the binary operation.
   Handle<Object> result;
   switch (state.op()) {
@@ -2676,11 +2703,8 @@
   Handle<Object> left = args.at<Object>(BinaryOpICStub::kLeft);
   Handle<Object> right = args.at<Object>(BinaryOpICStub::kRight);
   BinaryOpIC ic(isolate);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      ic.Transition(Handle<AllocationSite>::null(), left, right));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(
+      isolate, ic.Transition(Handle<AllocationSite>::null(), left, right));
 }
 
 
@@ -2695,10 +2719,8 @@
   Handle<Object> right =
       args.at<Object>(BinaryOpWithAllocationSiteStub::kRight);
   BinaryOpIC ic(isolate);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, ic.Transition(allocation_site, left, right));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate,
+                           ic.Transition(allocation_site, left, right));
 }
 
 Code* CompareIC::GetRawUninitialized(Isolate* isolate, Token::Value op) {
@@ -2726,7 +2748,7 @@
   CompareICState::State new_right =
       CompareICState::NewInputState(old_stub.right(), y);
   CompareICState::State state = CompareICState::TargetState(
-      old_stub.state(), old_stub.left(), old_stub.right(), op_,
+      isolate(), old_stub.state(), old_stub.left(), old_stub.right(), op_,
       HasInlinedSmiCode(address()), x, y);
   CompareICStub stub(isolate(), op_, new_left, new_right, state);
   if (state == CompareICState::KNOWN_RECEIVER) {
@@ -2805,6 +2827,12 @@
   CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 5);
   HandleScope scope(isolate);
 
+  if (FLAG_runtime_call_stats) {
+    RETURN_RESULT_OR_FAILURE(
+        isolate, Runtime::SetObjectProperty(isolate, receiver, name, value,
+                                            language_mode));
+  }
+
   Handle<AccessorInfo> callback(
       callback_or_cell->IsWeakCell()
           ? AccessorInfo::cast(WeakCell::cast(*callback_or_cell)->value())
@@ -2910,15 +2938,15 @@
 
   if (it.IsFound()) return *result;
 
+#ifdef DEBUG
   LoadICNexus nexus(isolate);
   LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
-  if (!ic.ShouldThrowReferenceError(it.GetReceiver())) {
-    return isolate->heap()->undefined_value();
-  }
+  // It could actually be any kind of LoadICs here but the predicate handles
+  // all the cases properly.
+  DCHECK(!ic.ShouldThrowReferenceError());
+#endif
 
-  // Throw a reference error.
-  THROW_NEW_ERROR_RETURN_FAILURE(
-      isolate, NewReferenceError(MessageTemplate::kNotDefined, it.name()));
+  return isolate->heap()->undefined_value();
 }
 
 
@@ -2996,7 +3024,6 @@
   HandleScope scope(isolate);
   Handle<Object> receiver = args.at<Object>(0);
   Handle<Name> key = args.at<Name>(1);
-  Handle<Object> result;
 
   DCHECK(args.length() == 4);
   Handle<Smi> slot = args.at<Smi>(2);
@@ -3009,17 +3036,15 @@
     LoadICNexus nexus(vector, vector_slot);
     LoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
     ic.UpdateState(receiver, key);
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+    RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
   } else {
     DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC,
               vector->GetKind(vector_slot));
     KeyedLoadICNexus nexus(vector, vector_slot);
     KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
     ic.UpdateState(receiver, key);
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+    RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
   }
-
-  return *result;
 }
 }  // namespace internal
 }  // namespace v8
diff --git a/src/ic/ic.h b/src/ic/ic.h
index 5dae179..8d45eab 100644
--- a/src/ic/ic.h
+++ b/src/ic/ic.h
@@ -47,7 +47,8 @@
 
 #ifdef DEBUG
   bool IsLoadStub() const {
-    return kind_ == Code::LOAD_IC || kind_ == Code::KEYED_LOAD_IC;
+    return kind_ == Code::LOAD_IC || kind_ == Code::LOAD_GLOBAL_IC ||
+           kind_ == Code::KEYED_LOAD_IC;
   }
   bool IsStoreStub() const {
     return kind_ == Code::STORE_IC || kind_ == Code::KEYED_STORE_IC;
@@ -63,22 +64,19 @@
                                              Isolate* isolate,
                                              CacheHolderFlag* flag);
 
-  static bool IsCleared(Code* code) {
-    InlineCacheState state = code->ic_state();
-    return !FLAG_use_ic || state == UNINITIALIZED || state == PREMONOMORPHIC;
-  }
-
   static bool IsCleared(FeedbackNexus* nexus) {
     InlineCacheState state = nexus->StateFromFeedback();
     return !FLAG_use_ic || state == UNINITIALIZED || state == PREMONOMORPHIC;
   }
 
   static bool ICUseVector(Code::Kind kind) {
-    return kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC ||
-           kind == Code::CALL_IC || kind == Code::STORE_IC ||
-           kind == Code::KEYED_STORE_IC;
+    return kind == Code::LOAD_IC || kind == Code::LOAD_GLOBAL_IC ||
+           kind == Code::KEYED_LOAD_IC || kind == Code::CALL_IC ||
+           kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC;
   }
 
+  static InlineCacheState StateFromCode(Code* code);
+
  protected:
   Address fp() const { return fp_; }
   Address pc() const { return *pc_address_; }
@@ -271,18 +269,16 @@
 
 class LoadIC : public IC {
  public:
-  TypeofMode typeof_mode() const {
-    return LoadICState::GetTypeofMode(extra_ic_state());
-  }
-
   LoadIC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = NULL)
       : IC(depth, isolate, nexus) {
     DCHECK(nexus != NULL);
     DCHECK(IsLoadStub());
   }
 
-  bool ShouldThrowReferenceError(Handle<Object> receiver) {
-    return receiver->IsJSGlobalObject() && typeof_mode() == NOT_INSIDE_TYPEOF;
+  bool ShouldThrowReferenceError() const {
+    return kind() == Code::LOAD_GLOBAL_IC &&
+           LoadGlobalICState::GetTypeofMode(extra_ic_state()) ==
+               NOT_INSIDE_TYPEOF;
   }
 
   // Code generator routines.
@@ -291,8 +287,7 @@
   static void GenerateRuntimeGetProperty(MacroAssembler* masm);
   static void GenerateNormal(MacroAssembler* masm);
 
-  static Handle<Code> initialize_stub_in_optimized_code(
-      Isolate* isolate, ExtraICState extra_state, State initialization_state);
+  static Handle<Code> initialize_stub_in_optimized_code(Isolate* isolate);
 
   MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
                                            Handle<Name> name);
@@ -300,7 +295,7 @@
   static void Clear(Isolate* isolate, Code* host, LoadICNexus* nexus);
 
  protected:
-  Handle<Code> slow_stub() const {
+  virtual Handle<Code> slow_stub() const {
     return isolate()->builtins()->LoadIC_Slow();
   }
 
@@ -319,6 +314,23 @@
   friend class IC;
 };
 
+class LoadGlobalIC : public LoadIC {
+ public:
+  LoadGlobalIC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = NULL)
+      : LoadIC(depth, isolate, nexus) {}
+
+  static Handle<Code> initialize_stub_in_optimized_code(
+      Isolate* isolate, ExtraICState extra_state);
+
+  MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Name> name);
+
+  static void Clear(Isolate* isolate, Code* host, LoadGlobalICNexus* nexus);
+
+ protected:
+  Handle<Code> slow_stub() const override {
+    return isolate()->builtins()->LoadGlobalIC_Slow();
+  }
+};
 
 class KeyedLoadIC : public LoadIC {
  public:
@@ -337,7 +349,7 @@
   static void GenerateMegamorphic(MacroAssembler* masm);
 
   static Handle<Code> initialize_stub_in_optimized_code(
-      Isolate* isolate, State initialization_state, ExtraICState extra_state);
+      Isolate* isolate, ExtraICState extra_state);
   static Handle<Code> ChooseMegamorphicStub(Isolate* isolate,
                                             ExtraICState extra_state);
 
@@ -366,13 +378,10 @@
   // Code generators for stub routines. Only called once at startup.
   static void GenerateSlow(MacroAssembler* masm);
   static void GenerateMiss(MacroAssembler* masm);
-  static void GenerateMegamorphic(MacroAssembler* masm);
   static void GenerateNormal(MacroAssembler* masm);
-  static void GenerateRuntimeSetProperty(MacroAssembler* masm,
-                                         LanguageMode language_mode);
 
   static Handle<Code> initialize_stub_in_optimized_code(
-      Isolate* isolate, LanguageMode language_mode, State initialization_state);
+      Isolate* isolate, LanguageMode language_mode);
 
   MUST_USE_RESULT MaybeHandle<Object> Store(
       Handle<Object> object, Handle<Name> name, Handle<Object> value,
@@ -386,7 +395,9 @@
 
  protected:
   // Stub accessors.
-  Handle<Code> slow_stub() const;
+  Handle<Code> slow_stub() const {
+    return isolate()->builtins()->StoreIC_Slow();
+  }
 
   // Update the inline cache and the global stub cache based on the
   // lookup result.
@@ -428,7 +439,7 @@
                                   LanguageMode language_mode);
 
   static Handle<Code> initialize_stub_in_optimized_code(
-      Isolate* isolate, LanguageMode language_mode, State initialization_state);
+      Isolate* isolate, LanguageMode language_mode);
   static Handle<Code> ChooseMegamorphicStub(Isolate* isolate,
                                             ExtraICState extra_state);
 
diff --git a/src/ic/mips/access-compiler-mips.cc b/src/ic/mips/access-compiler-mips.cc
index b122946..2aa0283 100644
--- a/src/ic/mips/access-compiler-mips.cc
+++ b/src/ic/mips/access-compiler-mips.cc
@@ -19,19 +19,19 @@
 
 
 Register* PropertyAccessCompiler::load_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3, scratch4.
+  // receiver, name, scratch1, scratch2, scratch3.
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, a3, a0, t0, t1};
+  static Register registers[] = {receiver, name, a3, a0, t0};
   return registers;
 }
 
 
 Register* PropertyAccessCompiler::store_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3.
+  // receiver, name, scratch1, scratch2.
   Register receiver = StoreDescriptor::ReceiverRegister();
   Register name = StoreDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, a3, t0, t1};
+  static Register registers[] = {receiver, name, a3, t0};
   return registers;
 }
 
diff --git a/src/ic/mips/handler-compiler-mips.cc b/src/ic/mips/handler-compiler-mips.cc
index 847782e..66789e4 100644
--- a/src/ic/mips/handler-compiler-mips.cc
+++ b/src/ic/mips/handler-compiler-mips.cc
@@ -196,8 +196,9 @@
     MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
     Register scratch, Label* miss) {
   Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
-  DCHECK(cell->value()->IsTheHole());
-  Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
+  Isolate* isolate = masm->isolate();
+  DCHECK(cell->value()->IsTheHole(isolate));
+  Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
   __ LoadWeakValue(scratch, weak_cell, miss);
   __ lw(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
   __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
@@ -279,7 +280,7 @@
   Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
   bool call_data_undefined = false;
   // Put call data in place.
-  if (api_call_info->data()->IsUndefined()) {
+  if (api_call_info->data()->IsUndefined(isolate)) {
     call_data_undefined = true;
     __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
   } else {
@@ -423,28 +424,25 @@
   DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
          !scratch2.is(scratch1));
 
-  if (FLAG_eliminate_prototype_chain_checks) {
-    Handle<Cell> validity_cell =
-        Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
-    if (!validity_cell.is_null()) {
-      DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid),
-                validity_cell->value());
-      __ li(scratch1, Operand(validity_cell));
-      __ lw(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
-      __ Branch(miss, ne, scratch1,
-                Operand(Smi::FromInt(Map::kPrototypeChainValid)));
-    }
+  Handle<Cell> validity_cell =
+      Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+  if (!validity_cell.is_null()) {
+    DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
+    __ li(scratch1, Operand(validity_cell));
+    __ lw(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
+    __ Branch(miss, ne, scratch1,
+              Operand(Smi::FromInt(Map::kPrototypeChainValid)));
+  }
 
-    // The prototype chain of primitives (and their JSValue wrappers) depends
-    // on the native context, which can't be guarded by validity cells.
-    // |object_reg| holds the native context specific prototype in this case;
-    // we need to check its map.
-    if (check == CHECK_ALL_MAPS) {
-      __ lw(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
-      Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
-      __ GetWeakValue(scratch2, cell);
-      __ Branch(miss, ne, scratch1, Operand(scratch2));
-    }
+  // The prototype chain of primitives (and their JSValue wrappers) depends
+  // on the native context, which can't be guarded by validity cells.
+  // |object_reg| holds the native context specific prototype in this case;
+  // we need to check its map.
+  if (check == CHECK_ALL_MAPS) {
+    __ lw(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+    Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+    __ GetWeakValue(scratch2, cell);
+    __ Branch(miss, ne, scratch1, Operand(scratch2));
   }
 
   // Keep track of the current object in register reg.
@@ -480,8 +478,10 @@
            !current_map->is_access_check_needed());
 
     prototype = handle(JSObject::cast(current_map->prototype()));
-    if (current_map->is_dictionary_map() &&
-        !current_map->IsJSGlobalObjectMap()) {
+    if (current_map->IsJSGlobalObjectMap()) {
+      GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+                                name, scratch2, miss);
+    } else if (current_map->is_dictionary_map()) {
       DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
       if (!name->IsUniqueName()) {
         DCHECK(name->IsString());
@@ -491,33 +491,12 @@
              current->property_dictionary()->FindEntry(name) ==
                  NameDictionary::kNotFound);
 
-      if (FLAG_eliminate_prototype_chain_checks && depth > 1) {
+      if (depth > 1) {
         // TODO(jkummerow): Cache and re-use weak cell.
         __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
       }
       GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
                                        scratch2);
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-        __ lw(holder_reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
-      }
-    } else {
-      Register map_reg = scratch1;
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
-      }
-      if (current_map->IsJSGlobalObjectMap()) {
-        GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
-                                  name, scratch2, miss);
-      } else if (!FLAG_eliminate_prototype_chain_checks &&
-                 (depth != 1 || check == CHECK_ALL_MAPS)) {
-        Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
-        __ GetWeakValue(scratch2, cell);
-        __ Branch(miss, ne, scratch2, Operand(map_reg));
-      }
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ lw(holder_reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
-      }
     }
 
     reg = holder_reg;  // From now on the object will be in holder_reg.
@@ -531,17 +510,8 @@
   // Log the check depth.
   LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
 
-  if (!FLAG_eliminate_prototype_chain_checks &&
-      (depth != 0 || check == CHECK_ALL_MAPS)) {
-    // Check the holder map.
-    __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-    Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
-    __ GetWeakValue(scratch2, cell);
-    __ Branch(miss, ne, scratch2, Operand(scratch1));
-  }
-
   bool return_holder = return_what == RETURN_HOLDER;
-  if (FLAG_eliminate_prototype_chain_checks && return_holder && depth != 0) {
+  if (return_holder && depth != 0) {
     __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
   }
 
@@ -587,7 +557,7 @@
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
-  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
 
   // Compile the interceptor call, followed by inline code to load the
   // property from further up the prototype chain if the call fails.
@@ -646,7 +616,7 @@
 void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
   // Call the runtime system to load the interceptor.
   DCHECK(holder()->HasNamedInterceptor());
-  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
   PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
                            holder());
 
@@ -662,7 +632,7 @@
   __ Push(receiver(), holder_reg);  // Receiver.
   // If the callback cannot leak, then push the callback directly,
   // otherwise wrap it in a weak cell.
-  if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+  if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
     __ li(at, Operand(callback));
   } else {
     Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
diff --git a/src/ic/mips/ic-mips.cc b/src/ic/mips/ic-mips.cc
index 6c44918..f20262b 100644
--- a/src/ic/mips/ic-mips.cc
+++ b/src/ic/mips/ic-mips.cc
@@ -727,25 +727,6 @@
   __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
 }
 
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  DCHECK(receiver.is(a1));
-  DCHECK(name.is(a2));
-  DCHECK(StoreDescriptor::ValueRegister().is(a0));
-
-  // Get the receiver from the stack and probe the stub cache.
-  Code::Flags flags =
-      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
-  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
-                                               receiver, name, t1, t2, t3, t4);
-
-  // Cache miss: Jump to runtime.
-  GenerateMiss(masm);
-}
-
-
 void StoreIC::GenerateMiss(MacroAssembler* masm) {
   StoreIC_PushArgs(masm);
 
@@ -839,8 +820,9 @@
   }
 
   if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, andi=%p, delta=%d\n", address,
-           andi_instruction_address, delta);
+    PrintF("[  patching ic at %p, andi=%p, delta=%d\n",
+           static_cast<void*>(address),
+           static_cast<void*>(andi_instruction_address), delta);
   }
 
   Address patch_address =
diff --git a/src/ic/mips64/access-compiler-mips64.cc b/src/ic/mips64/access-compiler-mips64.cc
index 96e921c..bf6c73e 100644
--- a/src/ic/mips64/access-compiler-mips64.cc
+++ b/src/ic/mips64/access-compiler-mips64.cc
@@ -19,19 +19,19 @@
 
 
 Register* PropertyAccessCompiler::load_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3, scratch4.
+  // receiver, name, scratch1, scratch2, scratch3.
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, a3, a0, a4, a5};
+  static Register registers[] = {receiver, name, a3, a0, a4};
   return registers;
 }
 
 
 Register* PropertyAccessCompiler::store_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3.
+  // receiver, name, scratch1, scratch2.
   Register receiver = StoreDescriptor::ReceiverRegister();
   Register name = StoreDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, a3, a4, a5};
+  static Register registers[] = {receiver, name, a3, a4};
   return registers;
 }
 
diff --git a/src/ic/mips64/handler-compiler-mips64.cc b/src/ic/mips64/handler-compiler-mips64.cc
index 81a9b3f..99ecbe2 100644
--- a/src/ic/mips64/handler-compiler-mips64.cc
+++ b/src/ic/mips64/handler-compiler-mips64.cc
@@ -196,8 +196,9 @@
     MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
     Register scratch, Label* miss) {
   Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
-  DCHECK(cell->value()->IsTheHole());
-  Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
+  Isolate* isolate = masm->isolate();
+  DCHECK(cell->value()->IsTheHole(isolate));
+  Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
   __ LoadWeakValue(scratch, weak_cell, miss);
   __ ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
   __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
@@ -279,7 +280,7 @@
   Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
   bool call_data_undefined = false;
   // Put call data in place.
-  if (api_call_info->data()->IsUndefined()) {
+  if (api_call_info->data()->IsUndefined(isolate)) {
     call_data_undefined = true;
     __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
   } else {
@@ -423,28 +424,25 @@
   DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
          !scratch2.is(scratch1));
 
-  if (FLAG_eliminate_prototype_chain_checks) {
-    Handle<Cell> validity_cell =
-        Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
-    if (!validity_cell.is_null()) {
-      DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid),
-                validity_cell->value());
-      __ li(scratch1, Operand(validity_cell));
-      __ ld(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
-      __ Branch(miss, ne, scratch1,
-                Operand(Smi::FromInt(Map::kPrototypeChainValid)));
-    }
+  Handle<Cell> validity_cell =
+      Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+  if (!validity_cell.is_null()) {
+    DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
+    __ li(scratch1, Operand(validity_cell));
+    __ ld(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
+    __ Branch(miss, ne, scratch1,
+              Operand(Smi::FromInt(Map::kPrototypeChainValid)));
+  }
 
-    // The prototype chain of primitives (and their JSValue wrappers) depends
-    // on the native context, which can't be guarded by validity cells.
-    // |object_reg| holds the native context specific prototype in this case;
-    // we need to check its map.
-    if (check == CHECK_ALL_MAPS) {
-      __ ld(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
-      Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
-      __ GetWeakValue(scratch2, cell);
-      __ Branch(miss, ne, scratch1, Operand(scratch2));
-    }
+  // The prototype chain of primitives (and their JSValue wrappers) depends
+  // on the native context, which can't be guarded by validity cells.
+  // |object_reg| holds the native context specific prototype in this case;
+  // we need to check its map.
+  if (check == CHECK_ALL_MAPS) {
+    __ ld(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+    Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+    __ GetWeakValue(scratch2, cell);
+    __ Branch(miss, ne, scratch1, Operand(scratch2));
   }
 
   // Keep track of the current object in register reg.
@@ -480,8 +478,10 @@
            !current_map->is_access_check_needed());
 
     prototype = handle(JSObject::cast(current_map->prototype()));
-    if (current_map->is_dictionary_map() &&
-        !current_map->IsJSGlobalObjectMap()) {
+    if (current_map->IsJSGlobalObjectMap()) {
+      GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+                                name, scratch2, miss);
+    } else if (current_map->is_dictionary_map()) {
       DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
       if (!name->IsUniqueName()) {
         DCHECK(name->IsString());
@@ -491,33 +491,12 @@
              current->property_dictionary()->FindEntry(name) ==
                  NameDictionary::kNotFound);
 
-      if (FLAG_eliminate_prototype_chain_checks && depth > 1) {
+      if (depth > 1) {
         // TODO(jkummerow): Cache and re-use weak cell.
         __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
       }
       GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
                                        scratch2);
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ ld(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-        __ ld(holder_reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
-      }
-    } else {
-      Register map_reg = scratch1;
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
-      }
-      if (current_map->IsJSGlobalObjectMap()) {
-        GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
-                                  name, scratch2, miss);
-      } else if (!FLAG_eliminate_prototype_chain_checks &&
-                 (depth != 1 || check == CHECK_ALL_MAPS)) {
-        Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
-        __ GetWeakValue(scratch2, cell);
-        __ Branch(miss, ne, scratch2, Operand(map_reg));
-      }
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ ld(holder_reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
-      }
     }
 
     reg = holder_reg;  // From now on the object will be in holder_reg.
@@ -531,17 +510,8 @@
   // Log the check depth.
   LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
 
-  if (!FLAG_eliminate_prototype_chain_checks &&
-      (depth != 0 || check == CHECK_ALL_MAPS)) {
-    // Check the holder map.
-    __ ld(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-    Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
-    __ GetWeakValue(scratch2, cell);
-    __ Branch(miss, ne, scratch2, Operand(scratch1));
-  }
-
   bool return_holder = return_what == RETURN_HOLDER;
-  if (FLAG_eliminate_prototype_chain_checks && return_holder && depth != 0) {
+  if (return_holder && depth != 0) {
     __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
   }
 
@@ -587,7 +557,7 @@
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
-  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
 
   // Compile the interceptor call, followed by inline code to load the
   // property from further up the prototype chain if the call fails.
@@ -646,7 +616,7 @@
 void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
   // Call the runtime system to load the interceptor.
   DCHECK(holder()->HasNamedInterceptor());
-  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
   PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
                            holder());
 
@@ -662,7 +632,7 @@
   __ Push(receiver(), holder_reg);  // Receiver.
   // If the callback cannot leak, then push the callback directly,
   // otherwise wrap it in a weak cell.
-  if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+  if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
     __ li(at, Operand(callback));
   } else {
     Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
diff --git a/src/ic/mips64/ic-mips64.cc b/src/ic/mips64/ic-mips64.cc
index 5193c85..81f1f92 100644
--- a/src/ic/mips64/ic-mips64.cc
+++ b/src/ic/mips64/ic-mips64.cc
@@ -731,25 +731,6 @@
   __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
 }
 
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  DCHECK(receiver.is(a1));
-  DCHECK(name.is(a2));
-  DCHECK(StoreDescriptor::ValueRegister().is(a0));
-
-  // Get the receiver from the stack and probe the stub cache.
-  Code::Flags flags =
-      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
-  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
-                                               receiver, name, a5, a6, a7, t0);
-
-  // Cache miss: Jump to runtime.
-  GenerateMiss(masm);
-}
-
-
 void StoreIC::GenerateMiss(MacroAssembler* masm) {
   StoreIC_PushArgs(masm);
 
@@ -841,8 +822,9 @@
   }
 
   if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, andi=%p, delta=%d\n", address,
-           andi_instruction_address, delta);
+    PrintF("[  patching ic at %p, andi=%p, delta=%d\n",
+           static_cast<void*>(address),
+           static_cast<void*>(andi_instruction_address), delta);
   }
 
   Address patch_address =
diff --git a/src/ic/ppc/OWNERS b/src/ic/ppc/OWNERS
index eb007cb..752e8e3 100644
--- a/src/ic/ppc/OWNERS
+++ b/src/ic/ppc/OWNERS
@@ -3,3 +3,4 @@
 joransiu@ca.ibm.com
 mbrandy@us.ibm.com
 michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/src/ic/ppc/access-compiler-ppc.cc b/src/ic/ppc/access-compiler-ppc.cc
index b1e06e1..6143b4c 100644
--- a/src/ic/ppc/access-compiler-ppc.cc
+++ b/src/ic/ppc/access-compiler-ppc.cc
@@ -19,19 +19,19 @@
 
 
 Register* PropertyAccessCompiler::load_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3, scratch4.
+  // receiver, name, scratch1, scratch2, scratch3.
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, r6, r3, r7, r8};
+  static Register registers[] = {receiver, name, r6, r3, r7};
   return registers;
 }
 
 
 Register* PropertyAccessCompiler::store_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3.
+  // receiver, name, scratch1, scratch2.
   Register receiver = StoreDescriptor::ReceiverRegister();
   Register name = StoreDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, r6, r7, r8};
+  static Register registers[] = {receiver, name, r6, r7};
   return registers;
 }
 
diff --git a/src/ic/ppc/handler-compiler-ppc.cc b/src/ic/ppc/handler-compiler-ppc.cc
index 49af112..3293d82 100644
--- a/src/ic/ppc/handler-compiler-ppc.cc
+++ b/src/ic/ppc/handler-compiler-ppc.cc
@@ -199,8 +199,9 @@
     MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
     Register scratch, Label* miss) {
   Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
-  DCHECK(cell->value()->IsTheHole());
-  Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
+  Isolate* isolate = masm->isolate();
+  DCHECK(cell->value()->IsTheHole(isolate));
+  Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
   __ LoadWeakValue(scratch, weak_cell, miss);
   __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
   __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
@@ -285,7 +286,7 @@
   Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
   bool call_data_undefined = false;
   // Put call data in place.
-  if (api_call_info->data()->IsUndefined()) {
+  if (api_call_info->data()->IsUndefined(isolate)) {
     call_data_undefined = true;
     __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
   } else {
@@ -432,28 +433,25 @@
   DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
          !scratch2.is(scratch1));
 
-  if (FLAG_eliminate_prototype_chain_checks) {
-    Handle<Cell> validity_cell =
-        Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
-    if (!validity_cell.is_null()) {
-      DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid),
-                validity_cell->value());
-      __ mov(scratch1, Operand(validity_cell));
-      __ LoadP(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
-      __ CmpSmiLiteral(scratch1, Smi::FromInt(Map::kPrototypeChainValid), r0);
-      __ bne(miss);
-    }
+  Handle<Cell> validity_cell =
+      Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+  if (!validity_cell.is_null()) {
+    DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
+    __ mov(scratch1, Operand(validity_cell));
+    __ LoadP(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
+    __ CmpSmiLiteral(scratch1, Smi::FromInt(Map::kPrototypeChainValid), r0);
+    __ bne(miss);
+  }
 
-    // The prototype chain of primitives (and their JSValue wrappers) depends
-    // on the native context, which can't be guarded by validity cells.
-    // |object_reg| holds the native context specific prototype in this case;
-    // we need to check its map.
-    if (check == CHECK_ALL_MAPS) {
-      __ LoadP(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
-      Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
-      __ CmpWeakValue(scratch1, cell, scratch2);
-      __ b(ne, miss);
-    }
+  // The prototype chain of primitives (and their JSValue wrappers) depends
+  // on the native context, which can't be guarded by validity cells.
+  // |object_reg| holds the native context specific prototype in this case;
+  // we need to check its map.
+  if (check == CHECK_ALL_MAPS) {
+    __ LoadP(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+    Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+    __ CmpWeakValue(scratch1, cell, scratch2);
+    __ b(ne, miss);
   }
 
   // Keep track of the current object in register reg.
@@ -488,8 +486,10 @@
            !current_map->is_access_check_needed());
 
     prototype = handle(JSObject::cast(current_map->prototype()));
-    if (current_map->is_dictionary_map() &&
-        !current_map->IsJSGlobalObjectMap()) {
+    if (current_map->IsJSGlobalObjectMap()) {
+      GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+                                name, scratch2, miss);
+    } else if (current_map->is_dictionary_map()) {
       DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
       if (!name->IsUniqueName()) {
         DCHECK(name->IsString());
@@ -499,33 +499,12 @@
              current->property_dictionary()->FindEntry(name) ==
                  NameDictionary::kNotFound);
 
-      if (FLAG_eliminate_prototype_chain_checks && depth > 1) {
+      if (depth > 1) {
         // TODO(jkummerow): Cache and re-use weak cell.
         __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
       }
       GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
                                        scratch2);
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ LoadP(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-        __ LoadP(holder_reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
-      }
-    } else {
-      Register map_reg = scratch1;
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
-      }
-      if (current_map->IsJSGlobalObjectMap()) {
-        GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
-                                  name, scratch2, miss);
-      } else if (!FLAG_eliminate_prototype_chain_checks &&
-                 (depth != 1 || check == CHECK_ALL_MAPS)) {
-        Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
-        __ CmpWeakValue(map_reg, cell, scratch2);
-        __ bne(miss);
-      }
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ LoadP(holder_reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
-      }
     }
 
     reg = holder_reg;  // From now on the object will be in holder_reg.
@@ -539,17 +518,8 @@
   // Log the check depth.
   LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
 
-  if (!FLAG_eliminate_prototype_chain_checks &&
-      (depth != 0 || check == CHECK_ALL_MAPS)) {
-    // Check the holder map.
-    __ LoadP(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-    Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
-    __ CmpWeakValue(scratch1, cell, scratch2);
-    __ bne(miss);
-  }
-
   bool return_holder = return_what == RETURN_HOLDER;
-  if (FLAG_eliminate_prototype_chain_checks && return_holder && depth != 0) {
+  if (return_holder && depth != 0) {
     __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
   }
 
@@ -595,7 +565,7 @@
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
-  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
 
   // Compile the interceptor call, followed by inline code to load the
   // property from further up the prototype chain if the call fails.
@@ -655,7 +625,7 @@
 void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
   // Call the runtime system to load the interceptor.
   DCHECK(holder()->HasNamedInterceptor());
-  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
   PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
                            holder());
 
@@ -672,7 +642,7 @@
 
   // If the callback cannot leak, then push the callback directly,
   // otherwise wrap it in a weak cell.
-  if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+  if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
     __ mov(ip, Operand(callback));
   } else {
     Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
diff --git a/src/ic/ppc/ic-ppc.cc b/src/ic/ppc/ic-ppc.cc
index 3c86786..1f0236d 100644
--- a/src/ic/ppc/ic-ppc.cc
+++ b/src/ic/ppc/ic-ppc.cc
@@ -737,26 +737,6 @@
   GenerateMiss(masm);
 }
 
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  DCHECK(receiver.is(r4));
-  DCHECK(name.is(r5));
-  DCHECK(StoreDescriptor::ValueRegister().is(r3));
-
-  // Get the receiver from the stack and probe the stub cache.
-  Code::Flags flags =
-      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
-
-  masm->isolate()->stub_cache()->GenerateProbe(
-      masm, Code::STORE_IC, flags, receiver, name, r8, r9, r10, r11);
-
-  // Cache miss: Jump to runtime.
-  GenerateMiss(masm);
-}
-
-
 void StoreIC::GenerateMiss(MacroAssembler* masm) {
   StoreIC_PushArgs(masm);
 
@@ -851,8 +831,9 @@
   }
 
   if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, cmp=%p, delta=%d\n", address,
-           cmp_instruction_address, delta);
+    PrintF("[  patching ic at %p, cmp=%p, delta=%d\n",
+           static_cast<void*>(address),
+           static_cast<void*>(cmp_instruction_address), delta);
   }
 
   Address patch_address =
diff --git a/src/ic/s390/OWNERS b/src/ic/s390/OWNERS
index eb007cb..752e8e3 100644
--- a/src/ic/s390/OWNERS
+++ b/src/ic/s390/OWNERS
@@ -3,3 +3,4 @@
 joransiu@ca.ibm.com
 mbrandy@us.ibm.com
 michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/src/ic/s390/access-compiler-s390.cc b/src/ic/s390/access-compiler-s390.cc
index 316be71..0a3285d 100644
--- a/src/ic/s390/access-compiler-s390.cc
+++ b/src/ic/s390/access-compiler-s390.cc
@@ -19,18 +19,18 @@
 }
 
 Register* PropertyAccessCompiler::load_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3, scratch4.
+  // receiver, name, scratch1, scratch2, scratch3.
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, r5, r2, r6, r7};
+  static Register registers[] = {receiver, name, r5, r2, r6};
   return registers;
 }
 
 Register* PropertyAccessCompiler::store_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3.
+  // receiver, name, scratch1, scratch2.
   Register receiver = StoreDescriptor::ReceiverRegister();
   Register name = StoreDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, r5, r6, r7};
+  static Register registers[] = {receiver, name, r5, r6};
   return registers;
 }
 
diff --git a/src/ic/s390/handler-compiler-s390.cc b/src/ic/s390/handler-compiler-s390.cc
index f15a04d..b643a84 100644
--- a/src/ic/s390/handler-compiler-s390.cc
+++ b/src/ic/s390/handler-compiler-s390.cc
@@ -188,8 +188,9 @@
     MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
     Register scratch, Label* miss) {
   Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
-  DCHECK(cell->value()->IsTheHole());
-  Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
+  Isolate* isolate = masm->isolate();
+  DCHECK(cell->value()->IsTheHole(isolate));
+  Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
   __ LoadWeakValue(scratch, weak_cell, miss);
   __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
   __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
@@ -270,7 +271,7 @@
   Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
   bool call_data_undefined = false;
   // Put call data in place.
-  if (api_call_info->data()->IsUndefined()) {
+  if (api_call_info->data()->IsUndefined(isolate)) {
     call_data_undefined = true;
     __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
   } else {
@@ -406,28 +407,25 @@
   DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
          !scratch2.is(scratch1));
 
-  if (FLAG_eliminate_prototype_chain_checks) {
-    Handle<Cell> validity_cell =
-        Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
-    if (!validity_cell.is_null()) {
-      DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid),
-                validity_cell->value());
-      __ mov(scratch1, Operand(validity_cell));
-      __ LoadP(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
-      __ CmpSmiLiteral(scratch1, Smi::FromInt(Map::kPrototypeChainValid), r0);
-      __ bne(miss);
-    }
+  Handle<Cell> validity_cell =
+      Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+  if (!validity_cell.is_null()) {
+    DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
+    __ mov(scratch1, Operand(validity_cell));
+    __ LoadP(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
+    __ CmpSmiLiteral(scratch1, Smi::FromInt(Map::kPrototypeChainValid), r0);
+    __ bne(miss);
+  }
 
-    // The prototype chain of primitives (and their JSValue wrappers) depends
-    // on the native context, which can't be guarded by validity cells.
-    // |object_reg| holds the native context specific prototype in this case;
-    // we need to check its map.
-    if (check == CHECK_ALL_MAPS) {
-      __ LoadP(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
-      Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
-      __ CmpWeakValue(scratch1, cell, scratch2);
-      __ b(ne, miss);
-    }
+  // The prototype chain of primitives (and their JSValue wrappers) depends
+  // on the native context, which can't be guarded by validity cells.
+  // |object_reg| holds the native context specific prototype in this case;
+  // we need to check its map.
+  if (check == CHECK_ALL_MAPS) {
+    __ LoadP(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+    Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+    __ CmpWeakValue(scratch1, cell, scratch2);
+    __ b(ne, miss);
   }
 
   // Keep track of the current object in register reg.
@@ -462,8 +460,10 @@
            !current_map->is_access_check_needed());
 
     prototype = handle(JSObject::cast(current_map->prototype()));
-    if (current_map->is_dictionary_map() &&
-        !current_map->IsJSGlobalObjectMap()) {
+    if (current_map->IsJSGlobalObjectMap()) {
+      GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+                                name, scratch2, miss);
+    } else if (current_map->is_dictionary_map()) {
       DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
       if (!name->IsUniqueName()) {
         DCHECK(name->IsString());
@@ -473,33 +473,12 @@
              current->property_dictionary()->FindEntry(name) ==
                  NameDictionary::kNotFound);
 
-      if (FLAG_eliminate_prototype_chain_checks && depth > 1) {
+      if (depth > 1) {
         // TODO(jkummerow): Cache and re-use weak cell.
         __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
       }
       GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
                                        scratch2);
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ LoadP(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-        __ LoadP(holder_reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
-      }
-    } else {
-      Register map_reg = scratch1;
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
-      }
-      if (current_map->IsJSGlobalObjectMap()) {
-        GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
-                                  name, scratch2, miss);
-      } else if (!FLAG_eliminate_prototype_chain_checks &&
-                 (depth != 1 || check == CHECK_ALL_MAPS)) {
-        Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
-        __ CmpWeakValue(map_reg, cell, scratch2);
-        __ bne(miss);
-      }
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ LoadP(holder_reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
-      }
     }
 
     reg = holder_reg;  // From now on the object will be in holder_reg.
@@ -513,17 +492,8 @@
   // Log the check depth.
   LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
 
-  if (!FLAG_eliminate_prototype_chain_checks &&
-      (depth != 0 || check == CHECK_ALL_MAPS)) {
-    // Check the holder map.
-    __ LoadP(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-    Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
-    __ CmpWeakValue(scratch1, cell, scratch2);
-    __ bne(miss);
-  }
-
   bool return_holder = return_what == RETURN_HOLDER;
-  if (FLAG_eliminate_prototype_chain_checks && return_holder && depth != 0) {
+  if (return_holder && depth != 0) {
     __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
   }
 
@@ -565,7 +535,7 @@
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
-  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
 
   // Compile the interceptor call, followed by inline code to load the
   // property from further up the prototype chain if the call fails.
@@ -623,7 +593,7 @@
 void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
   // Call the runtime system to load the interceptor.
   DCHECK(holder()->HasNamedInterceptor());
-  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
   PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
                            holder());
 
@@ -639,7 +609,7 @@
 
   // If the callback cannot leak, then push the callback directly,
   // otherwise wrap it in a weak cell.
-  if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+  if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
     __ mov(ip, Operand(callback));
   } else {
     Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
diff --git a/src/ic/s390/ic-s390.cc b/src/ic/s390/ic-s390.cc
index bf9f8a1..64adc9f 100644
--- a/src/ic/s390/ic-s390.cc
+++ b/src/ic/s390/ic-s390.cc
@@ -719,24 +719,6 @@
   GenerateMiss(masm);
 }
 
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  DCHECK(receiver.is(r3));
-  DCHECK(name.is(r4));
-  DCHECK(StoreDescriptor::ValueRegister().is(r2));
-
-  // Get the receiver from the stack and probe the stub cache.
-  Code::Flags flags =
-      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
-
-  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
-                                               receiver, name, r7, r8, r9, ip);
-
-  // Cache miss: Jump to runtime.
-  GenerateMiss(masm);
-}
-
 void StoreIC::GenerateMiss(MacroAssembler* masm) {
   StoreIC_PushArgs(masm);
 
@@ -828,8 +810,9 @@
   }
 
   if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, cmp=%p, delta=%d\n", address,
-           cmp_instruction_address, delta);
+    PrintF("[  patching ic at %p, cmp=%p, delta=%d\n",
+           static_cast<void*>(address),
+           static_cast<void*>(cmp_instruction_address), delta);
   }
 
   // Expected sequence to enable by changing the following
diff --git a/src/ic/stub-cache.cc b/src/ic/stub-cache.cc
index 5d71c1f..f51366f 100644
--- a/src/ic/stub-cache.cc
+++ b/src/ic/stub-cache.cc
@@ -34,7 +34,6 @@
   // cache only contains handlers. Make sure that the bits are the least
   // significant so they will be the ones masked out.
   DCHECK_EQ(Code::HANDLER, Code::ExtractKindFromFlags(flags));
-  STATIC_ASSERT((Code::ICStateField::kMask & 1) == 1);
 
   // Make sure that the cache holder are not included in the hash.
   DCHECK(Code::ExtractCacheHolderFromFlags(flags) == 0);
@@ -75,12 +74,14 @@
   flags = CommonStubCacheChecks(name, map, flags);
   int primary_offset = PrimaryOffset(name, flags, map);
   Entry* primary = entry(primary_, primary_offset);
-  if (primary->key == name && primary->map == map) {
+  if (primary->key == name && primary->map == map &&
+      flags == Code::RemoveHolderFromFlags(primary->value->flags())) {
     return primary->value;
   }
   int secondary_offset = SecondaryOffset(name, flags, primary_offset);
   Entry* secondary = entry(secondary_, secondary_offset);
-  if (secondary->key == name && secondary->map == map) {
+  if (secondary->key == name && secondary->map == map &&
+      flags == Code::RemoveHolderFromFlags(secondary->value->flags())) {
     return secondary->value;
   }
   return NULL;
diff --git a/src/ic/stub-cache.h b/src/ic/stub-cache.h
index 4b27e6e..03afcdc 100644
--- a/src/ic/stub-cache.h
+++ b/src/ic/stub-cache.h
@@ -92,9 +92,24 @@
   // automatically discards the hash bit field.
   static const int kCacheIndexShift = Name::kHashShift;
 
- private:
+  static const int kPrimaryTableBits = 11;
+  static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
+  static const int kSecondaryTableBits = 9;
+  static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
+
+  static int PrimaryOffsetForTesting(Name* name, Code::Flags flags, Map* map) {
+    return PrimaryOffset(name, flags, map);
+  }
+
+  static int SecondaryOffsetForTesting(Name* name, Code::Flags flags,
+                                       int seed) {
+    return SecondaryOffset(name, flags, seed);
+  }
+
+  // The constructor is made public only for the purposes of testing.
   explicit StubCache(Isolate* isolate);
 
+ private:
   // The stub cache has a primary and secondary level.  The two levels have
   // different hashing algorithms in order to avoid simultaneous collisions
   // in both caches.  Unlike a probing strategy (quadratic or otherwise) the
@@ -150,11 +165,6 @@
                                     offset * multiplier);
   }
 
-  static const int kPrimaryTableBits = 11;
-  static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
-  static const int kSecondaryTableBits = 9;
-  static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
-
  private:
   Entry primary_[kPrimaryTableSize];
   Entry secondary_[kSecondaryTableSize];
diff --git a/src/ic/x64/access-compiler-x64.cc b/src/ic/x64/access-compiler-x64.cc
index b8d50b3..2b29252 100644
--- a/src/ic/x64/access-compiler-x64.cc
+++ b/src/ic/x64/access-compiler-x64.cc
@@ -19,19 +19,19 @@
 
 
 Register* PropertyAccessCompiler::load_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3, scratch4.
+  // receiver, name, scratch1, scratch2, scratch3.
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, rax, rbx, rdi, r8};
+  static Register registers[] = {receiver, name, rax, rbx, rdi};
   return registers;
 }
 
 
 Register* PropertyAccessCompiler::store_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3.
+  // receiver, name, scratch1, scratch2.
   Register receiver = StoreDescriptor::ReceiverRegister();
   Register name = StoreDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, rbx, rdi, r8};
+  static Register registers[] = {receiver, name, rbx, rdi};
   return registers;
 }
 
diff --git a/src/ic/x64/handler-compiler-x64.cc b/src/ic/x64/handler-compiler-x64.cc
index 21d96ea..5c64288 100644
--- a/src/ic/x64/handler-compiler-x64.cc
+++ b/src/ic/x64/handler-compiler-x64.cc
@@ -180,7 +180,7 @@
   Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
   bool call_data_undefined = false;
   // Put call data in place.
-  if (api_call_info->data()->IsUndefined()) {
+  if (api_call_info->data()->IsUndefined(isolate)) {
     call_data_undefined = true;
     __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
   } else {
@@ -220,12 +220,12 @@
     MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
     Register scratch, Label* miss) {
   Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
-  DCHECK(cell->value()->IsTheHole());
-  Factory* factory = masm->isolate()->factory();
-  Handle<WeakCell> weak_cell = factory->NewWeakCell(cell);
+  Isolate* isolate = masm->isolate();
+  DCHECK(cell->value()->IsTheHole(isolate));
+  Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
   __ LoadWeakValue(scratch, weak_cell, miss);
   __ Cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
-         factory->the_hole_value());
+         isolate->factory()->the_hole_value());
   __ j(not_equal, miss);
 }
 
@@ -440,29 +440,26 @@
   DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
          !scratch2.is(scratch1));
 
-  if (FLAG_eliminate_prototype_chain_checks) {
-    Handle<Cell> validity_cell =
-        Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
-    if (!validity_cell.is_null()) {
-      DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid),
-                validity_cell->value());
-      __ Move(scratch1, validity_cell, RelocInfo::CELL);
-      // Move(..., CELL) loads the payload's address!
-      __ SmiCompare(Operand(scratch1, 0),
-                    Smi::FromInt(Map::kPrototypeChainValid));
-      __ j(not_equal, miss);
-    }
+  Handle<Cell> validity_cell =
+      Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+  if (!validity_cell.is_null()) {
+    DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
+    __ Move(scratch1, validity_cell, RelocInfo::CELL);
+    // Move(..., CELL) loads the payload's address!
+    __ SmiCompare(Operand(scratch1, 0),
+                  Smi::FromInt(Map::kPrototypeChainValid));
+    __ j(not_equal, miss);
+  }
 
-    // The prototype chain of primitives (and their JSValue wrappers) depends
-    // on the native context, which can't be guarded by validity cells.
-    // |object_reg| holds the native context specific prototype in this case;
-    // we need to check its map.
-    if (check == CHECK_ALL_MAPS) {
-      __ movp(scratch1, FieldOperand(object_reg, HeapObject::kMapOffset));
-      Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
-      __ CmpWeakValue(scratch1, cell, scratch2);
-      __ j(not_equal, miss);
-    }
+  // The prototype chain of primitives (and their JSValue wrappers) depends
+  // on the native context, which can't be guarded by validity cells.
+  // |object_reg| holds the native context specific prototype in this case;
+  // we need to check its map.
+  if (check == CHECK_ALL_MAPS) {
+    __ movp(scratch1, FieldOperand(object_reg, HeapObject::kMapOffset));
+    Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+    __ CmpWeakValue(scratch1, cell, scratch2);
+    __ j(not_equal, miss);
   }
 
   // Keep track of the current object in register reg.  On the first
@@ -500,8 +497,10 @@
            !current_map->is_access_check_needed());
 
     prototype = handle(JSObject::cast(current_map->prototype()));
-    if (current_map->is_dictionary_map() &&
-        !current_map->IsJSGlobalObjectMap()) {
+    if (current_map->IsJSGlobalObjectMap()) {
+      GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+                                name, scratch2, miss);
+    } else if (current_map->is_dictionary_map()) {
       DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
       if (!name->IsUniqueName()) {
         DCHECK(name->IsString());
@@ -511,34 +510,12 @@
              current->property_dictionary()->FindEntry(name) ==
                  NameDictionary::kNotFound);
 
-      if (FLAG_eliminate_prototype_chain_checks && depth > 1) {
+      if (depth > 1) {
         // TODO(jkummerow): Cache and re-use weak cell.
         __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
       }
       GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
                                        scratch2);
-
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-        __ movp(holder_reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-      }
-    } else {
-      Register map_reg = scratch1;
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ movp(map_reg, FieldOperand(reg, HeapObject::kMapOffset));
-      }
-      if (current_map->IsJSGlobalObjectMap()) {
-        GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
-                                  name, scratch2, miss);
-      } else if (!FLAG_eliminate_prototype_chain_checks &&
-                 (depth != 1 || check == CHECK_ALL_MAPS)) {
-        Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
-        __ CmpWeakValue(map_reg, cell, scratch2);
-        __ j(not_equal, miss);
-      }
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ movp(holder_reg, FieldOperand(map_reg, Map::kPrototypeOffset));
-      }
     }
 
     reg = holder_reg;  // From now on the object will be in holder_reg.
@@ -552,17 +529,8 @@
   // Log the check depth.
   LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
 
-  if (!FLAG_eliminate_prototype_chain_checks &&
-      (depth != 0 || check == CHECK_ALL_MAPS)) {
-    // Check the holder map.
-    __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-    Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
-    __ CmpWeakValue(scratch1, cell, scratch2);
-    __ j(not_equal, miss);
-  }
-
   bool return_holder = return_what == RETURN_HOLDER;
-  if (FLAG_eliminate_prototype_chain_checks && return_holder && depth != 0) {
+  if (return_holder && depth != 0) {
     __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
   }
 
@@ -607,7 +575,7 @@
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
-  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
 
   // Compile the interceptor call, followed by inline code to load the
   // property from further up the prototype chain if the call fails.
@@ -669,7 +637,7 @@
 void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
   // Call the runtime system to load the interceptor.
   DCHECK(holder()->HasNamedInterceptor());
-  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
   __ PopReturnAddressTo(scratch2());
   PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
                            holder());
@@ -689,7 +657,7 @@
   __ Push(holder_reg);
   // If the callback cannot leak, then push the callback directly,
   // otherwise wrap it in a weak cell.
-  if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+  if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
     __ Push(callback);
   } else {
     Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
diff --git a/src/ic/x64/ic-x64.cc b/src/ic/x64/ic-x64.cc
index 4e9927d..da74582 100644
--- a/src/ic/x64/ic-x64.cc
+++ b/src/ic/x64/ic-x64.cc
@@ -451,7 +451,7 @@
   __ JumpIfDictionaryInPrototypeChain(receiver, rdi, kScratchRegister, slow);
 
   __ bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(value, rbx, key, xmm0,
+  __ StoreNumberToDoubleElements(value, rbx, key, kScratchDoubleReg,
                                  &transition_double_elements);
   if (increment_length == kIncrementLength) {
     // Add 1 to receiver->length.
@@ -710,13 +710,6 @@
   __ TailCallRuntime(Runtime::kKeyedGetProperty);
 }
 
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
-  // This shouldn't be called.
-  __ int3();
-}
-
-
 static void StoreIC_PushArgs(MacroAssembler* masm) {
   Register receiver = StoreDescriptor::ReceiverRegister();
   Register name = StoreDescriptor::NameRegister();
@@ -829,8 +822,9 @@
   // condition code uses at the patched jump.
   uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
   if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, test=%p, delta=%d\n", address,
-           test_instruction_address, delta);
+    PrintF("[  patching ic at %p, test=%p, delta=%d\n",
+           static_cast<void*>(address),
+           static_cast<void*>(test_instruction_address), delta);
   }
 
   // Patch with a short conditional jump. Enabling means switching from a short
diff --git a/src/ic/x64/stub-cache-x64.cc b/src/ic/x64/stub-cache-x64.cc
index a65165b..fa0a0b3 100644
--- a/src/ic/x64/stub-cache-x64.cc
+++ b/src/ic/x64/stub-cache-x64.cc
@@ -106,7 +106,8 @@
   // the vector and slot registers, which need to be preserved for a handler
   // call or miss.
   if (IC::ICUseVector(ic_kind)) {
-    if (ic_kind == Code::LOAD_IC || ic_kind == Code::KEYED_LOAD_IC) {
+    if (ic_kind == Code::LOAD_IC || ic_kind == Code::LOAD_GLOBAL_IC ||
+        ic_kind == Code::KEYED_LOAD_IC) {
       Register vector = LoadWithVectorDescriptor::VectorRegister();
       Register slot = LoadDescriptor::SlotRegister();
       DCHECK(!AreAliased(vector, slot, scratch));
diff --git a/src/ic/x87/access-compiler-x87.cc b/src/ic/x87/access-compiler-x87.cc
index 2c1b942..e528de6 100644
--- a/src/ic/x87/access-compiler-x87.cc
+++ b/src/ic/x87/access-compiler-x87.cc
@@ -18,19 +18,19 @@
 
 
 Register* PropertyAccessCompiler::load_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3, scratch4.
+  // receiver, name, scratch1, scratch2, scratch3.
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, ebx, eax, edi, no_reg};
+  static Register registers[] = {receiver, name, ebx, eax, edi};
   return registers;
 }
 
 
 Register* PropertyAccessCompiler::store_calling_convention() {
-  // receiver, name, scratch1, scratch2, scratch3.
+  // receiver, name, scratch1, scratch2.
   Register receiver = StoreDescriptor::ReceiverRegister();
   Register name = StoreDescriptor::NameRegister();
-  static Register registers[] = {receiver, name, ebx, edi, no_reg};
+  static Register registers[] = {receiver, name, ebx, edi};
   return registers;
 }
 
diff --git a/src/ic/x87/handler-compiler-x87.cc b/src/ic/x87/handler-compiler-x87.cc
index 7983273..c5f2d9f 100644
--- a/src/ic/x87/handler-compiler-x87.cc
+++ b/src/ic/x87/handler-compiler-x87.cc
@@ -199,7 +199,7 @@
   Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
   bool call_data_undefined = false;
   // Put call data in place.
-  if (api_call_info->data()->IsUndefined()) {
+  if (api_call_info->data()->IsUndefined(isolate)) {
     call_data_undefined = true;
     __ mov(data, Immediate(isolate->factory()->undefined_value()));
   } else {
@@ -237,12 +237,12 @@
     MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
     Register scratch, Label* miss) {
   Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
-  DCHECK(cell->value()->IsTheHole());
-  Factory* factory = masm->isolate()->factory();
-  Handle<WeakCell> weak_cell = factory->NewWeakCell(cell);
+  Isolate* isolate = masm->isolate();
+  DCHECK(cell->value()->IsTheHole(isolate));
+  Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
   __ LoadWeakValue(scratch, weak_cell, miss);
   __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
-         Immediate(factory->the_hole_value()));
+         Immediate(isolate->factory()->the_hole_value()));
   __ j(not_equal, miss);
 }
 
@@ -439,28 +439,25 @@
   DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
          !scratch2.is(scratch1));
 
-  if (FLAG_eliminate_prototype_chain_checks) {
-    Handle<Cell> validity_cell =
-        Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
-    if (!validity_cell.is_null()) {
-      DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid),
-                validity_cell->value());
-      // Operand::ForCell(...) points to the cell's payload!
-      __ cmp(Operand::ForCell(validity_cell),
-             Immediate(Smi::FromInt(Map::kPrototypeChainValid)));
-      __ j(not_equal, miss);
-    }
+  Handle<Cell> validity_cell =
+      Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+  if (!validity_cell.is_null()) {
+    DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
+    // Operand::ForCell(...) points to the cell's payload!
+    __ cmp(Operand::ForCell(validity_cell),
+           Immediate(Smi::FromInt(Map::kPrototypeChainValid)));
+    __ j(not_equal, miss);
+  }
 
-    // The prototype chain of primitives (and their JSValue wrappers) depends
-    // on the native context, which can't be guarded by validity cells.
-    // |object_reg| holds the native context specific prototype in this case;
-    // we need to check its map.
-    if (check == CHECK_ALL_MAPS) {
-      __ mov(scratch1, FieldOperand(object_reg, HeapObject::kMapOffset));
-      Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
-      __ CmpWeakValue(scratch1, cell, scratch2);
-      __ j(not_equal, miss);
-    }
+  // The prototype chain of primitives (and their JSValue wrappers) depends
+  // on the native context, which can't be guarded by validity cells.
+  // |object_reg| holds the native context specific prototype in this case;
+  // we need to check its map.
+  if (check == CHECK_ALL_MAPS) {
+    __ mov(scratch1, FieldOperand(object_reg, HeapObject::kMapOffset));
+    Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+    __ CmpWeakValue(scratch1, cell, scratch2);
+    __ j(not_equal, miss);
   }
 
   // Keep track of the current object in register reg.
@@ -496,8 +493,10 @@
            !current_map->is_access_check_needed());
 
     prototype = handle(JSObject::cast(current_map->prototype()));
-    if (current_map->is_dictionary_map() &&
-        !current_map->IsJSGlobalObjectMap()) {
+    if (current_map->IsJSGlobalObjectMap()) {
+      GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+                                name, scratch2, miss);
+    } else if (current_map->is_dictionary_map()) {
       DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
       if (!name->IsUniqueName()) {
         DCHECK(name->IsString());
@@ -507,34 +506,12 @@
              current->property_dictionary()->FindEntry(name) ==
                  NameDictionary::kNotFound);
 
-      if (FLAG_eliminate_prototype_chain_checks && depth > 1) {
+      if (depth > 1) {
         // TODO(jkummerow): Cache and re-use weak cell.
         __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
       }
       GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
                                        scratch2);
-
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-        __ mov(holder_reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-      }
-    } else {
-      Register map_reg = scratch1;
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ mov(map_reg, FieldOperand(reg, HeapObject::kMapOffset));
-      }
-      if (current_map->IsJSGlobalObjectMap()) {
-        GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
-                                  name, scratch2, miss);
-      } else if (!FLAG_eliminate_prototype_chain_checks &&
-                 (depth != 1 || check == CHECK_ALL_MAPS)) {
-        Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
-        __ CmpWeakValue(map_reg, cell, scratch2);
-        __ j(not_equal, miss);
-      }
-      if (!FLAG_eliminate_prototype_chain_checks) {
-        __ mov(holder_reg, FieldOperand(map_reg, Map::kPrototypeOffset));
-      }
     }
 
     reg = holder_reg;  // From now on the object will be in holder_reg.
@@ -548,17 +525,8 @@
   // Log the check depth.
   LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
 
-  if (!FLAG_eliminate_prototype_chain_checks &&
-      (depth != 0 || check == CHECK_ALL_MAPS)) {
-    // Check the holder map.
-    __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-    Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
-    __ CmpWeakValue(scratch1, cell, scratch2);
-    __ j(not_equal, miss);
-  }
-
   bool return_holder = return_what == RETURN_HOLDER;
-  if (FLAG_eliminate_prototype_chain_checks && return_holder && depth != 0) {
+  if (return_holder && depth != 0) {
     __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
   }
 
@@ -604,7 +572,7 @@
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
-  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
 
   // Compile the interceptor call, followed by inline code to load the
   // property from further up the prototype chain if the call fails.
@@ -671,7 +639,7 @@
 
 void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
-  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
   // Call the runtime system to load the interceptor.
   __ pop(scratch2());  // save old return address
   PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
@@ -692,7 +660,7 @@
   __ push(holder_reg);
   // If the callback cannot leak, then push the callback directly,
   // otherwise wrap it in a weak cell.
-  if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+  if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
     __ Push(callback);
   } else {
     Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
diff --git a/src/ic/x87/ic-x87.cc b/src/ic/x87/ic-x87.cc
index 9491954..9db5591 100644
--- a/src/ic/x87/ic-x87.cc
+++ b/src/ic/x87/ic-x87.cc
@@ -708,15 +708,6 @@
   __ TailCallRuntime(Runtime::kKeyedGetProperty);
 }
 
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
-  // This shouldn't be called.
-  // TODO(mvstanton): remove this method.
-  __ int3();
-  return;
-}
-
-
 static void StoreIC_PushArgs(MacroAssembler* masm) {
   Register receiver = StoreDescriptor::ReceiverRegister();
   Register name = StoreDescriptor::NameRegister();
@@ -836,8 +827,9 @@
   // condition code uses at the patched jump.
   uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
   if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, test=%p, delta=%d\n", address,
-           test_instruction_address, delta);
+    PrintF("[  patching ic at %p, test=%p, delta=%d\n",
+           static_cast<void*>(address),
+           static_cast<void*>(test_instruction_address), delta);
   }
 
   // Patch with a short conditional jump. Enabling means switching from a short
diff --git a/src/icu_util.cc b/src/icu_util.cc
index 0225130..a6f0453 100644
--- a/src/icu_util.cc
+++ b/src/icu_util.cc
@@ -15,6 +15,8 @@
 #include "unicode/putil.h"
 #include "unicode/udata.h"
 
+#include "src/base/file-utils.h"
+
 #define ICU_UTIL_DATA_FILE   0
 #define ICU_UTIL_DATA_SHARED 1
 #define ICU_UTIL_DATA_STATIC 2
@@ -38,6 +40,26 @@
 }  // namespace
 #endif
 
+bool InitializeICUDefaultLocation(const char* exec_path,
+                                  const char* icu_data_file) {
+#if !defined(V8_I18N_SUPPORT)
+  return true;
+#else
+#if ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE
+  if (icu_data_file) {
+    return InitializeICU(icu_data_file);
+  }
+  char* icu_data_file_default;
+  RelativePath(&icu_data_file_default, exec_path, "icudtl.dat");
+  bool result = InitializeICU(icu_data_file_default);
+  free(icu_data_file_default);
+  return result;
+#else
+  return InitializeICU(NULL);
+#endif
+#endif
+}
+
 bool InitializeICU(const char* icu_data_file) {
 #if !defined(V8_I18N_SUPPORT)
   return true;
diff --git a/src/icu_util.h b/src/icu_util.h
index c308dec..c5ef862 100644
--- a/src/icu_util.h
+++ b/src/icu_util.h
@@ -14,6 +14,11 @@
 // function should be called before ICU is used.
 bool InitializeICU(const char* icu_data_file);
 
+// Like above, but using the default icudtl.dat location if icu_data_file is
+// not specified.
+bool InitializeICUDefaultLocation(const char* exec_path,
+                                  const char* icu_data_file);
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/interface-descriptors.cc b/src/interface-descriptors.cc
index 860ad2a..50638f9 100644
--- a/src/interface-descriptors.cc
+++ b/src/interface-descriptors.cc
@@ -43,9 +43,8 @@
   return function;
 }
 
-
 void CallInterfaceDescriptorData::InitializePlatformSpecific(
-    int register_parameter_count, Register* registers,
+    int register_parameter_count, const Register* registers,
     PlatformInterfaceDescriptor* platform_descriptor) {
   platform_specific_descriptor_ = platform_descriptor;
   register_param_count_ = register_parameter_count;
@@ -98,6 +97,38 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+FunctionType* LoadGlobalDescriptor::BuildCallInterfaceDescriptorFunctionType(
+    Isolate* isolate, int paramater_count) {
+  Zone* zone = isolate->interface_descriptor_zone();
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 1, zone)->AsFunction();
+  function->InitParameter(0, SmiType(zone));
+  return function;
+}
+
+void LoadGlobalDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {LoadWithVectorDescriptor::SlotRegister()};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+FunctionType*
+LoadGlobalWithVectorDescriptor::BuildCallInterfaceDescriptorFunctionType(
+    Isolate* isolate, int paramater_count) {
+  Zone* zone = isolate->interface_descriptor_zone();
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 2, zone)->AsFunction();
+  function->InitParameter(0, SmiType(zone));
+  function->InitParameter(1, AnyTagged(zone));
+  return function;
+}
+
+void LoadGlobalWithVectorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {LoadWithVectorDescriptor::SlotRegister(),
+                          LoadWithVectorDescriptor::VectorRegister()};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void StoreDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -143,23 +174,6 @@
 }
 
 FunctionType*
-LoadGlobalViaContextDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int paramater_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), 1, zone)->AsFunction();
-  function->InitParameter(0, UntaggedIntegral32(zone));
-  return function;
-}
-
-
-void LoadGlobalViaContextDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {SlotRegister()};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-FunctionType*
 StoreGlobalViaContextDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
@@ -190,19 +204,12 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-void HasPropertyDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {KeyRegister(), ObjectRegister()};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
 void MathPowTaggedDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {exponent()};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
 void MathPowIntegerDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {exponent()};
@@ -309,6 +316,48 @@
   data->InitializePlatformSpecific(0, nullptr);
 }
 
+CallInterfaceDescriptor OnStackArgsDescriptorBase::ForArgs(
+    Isolate* isolate, int parameter_count) {
+  switch (parameter_count) {
+    case 1:
+      return OnStackWith1ArgsDescriptor(isolate);
+    case 2:
+      return OnStackWith2ArgsDescriptor(isolate);
+    case 3:
+      return OnStackWith3ArgsDescriptor(isolate);
+    case 4:
+      return OnStackWith4ArgsDescriptor(isolate);
+    case 5:
+      return OnStackWith5ArgsDescriptor(isolate);
+    case 6:
+      return OnStackWith6ArgsDescriptor(isolate);
+    case 7:
+      return OnStackWith7ArgsDescriptor(isolate);
+    default:
+      UNREACHABLE();
+      return VoidDescriptor(isolate);
+  }
+}
+
+FunctionType*
+OnStackArgsDescriptorBase::BuildCallInterfaceDescriptorFunctionTypeWithArg(
+    Isolate* isolate, int register_parameter_count, int parameter_count) {
+  DCHECK_EQ(0, register_parameter_count);
+  DCHECK_GT(parameter_count, 0);
+  Zone* zone = isolate->interface_descriptor_zone();
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), AnyTagged(zone), parameter_count, zone)
+          ->AsFunction();
+  for (int i = 0; i < parameter_count; i++) {
+    function->InitParameter(i, AnyTagged(zone));
+  }
+  return function;
+}
+
+void OnStackArgsDescriptorBase::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  data->InitializePlatformSpecific(0, nullptr);
+}
 
 void GrowArrayElementsDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -316,7 +365,8 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-FunctionType* FastArrayPushDescriptor::BuildCallInterfaceDescriptorFunctionType(
+FunctionType*
+VarArgFunctionDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
   FunctionType* function =
@@ -444,26 +494,29 @@
   return function;
 }
 
+FunctionType* ArraySingleArgumentConstructorDescriptor::
+    BuildCallInterfaceDescriptorFunctionType(Isolate* isolate,
+                                             int paramater_count) {
+  Zone* zone = isolate->interface_descriptor_zone();
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 5, zone)->AsFunction();
+  function->InitParameter(0, Type::Receiver());  // JSFunction
+  function->InitParameter(1, AnyTagged(zone));
+  function->InitParameter(2, UntaggedIntegral32(zone));
+  function->InitParameter(3, AnyTagged(zone));
+  function->InitParameter(4, AnyTagged(zone));
+  return function;
+}
+
 FunctionType*
-ArrayConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ArrayNArgumentsConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
   FunctionType* function =
       Type::Function(AnyTagged(zone), Type::Undefined(), 3, zone)->AsFunction();
   function->InitParameter(0, Type::Receiver());  // JSFunction
-  function->InitParameter(1, AnyTagged(zone));
-  function->InitParameter(2, UntaggedIntegral32(zone));
-  return function;
-}
-
-FunctionType*
-InternalArrayConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int paramater_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), 2, zone)->AsFunction();
-  function->InitParameter(0, Type::Receiver());  // JSFunction
-  function->InitParameter(1, UntaggedIntegral32(zone));
+  function->InitParameter(1, AnyTagged(zone));   // Allocation site or undefined
+  function->InitParameter(2, UntaggedIntegral32(zone));  //  Arg count
   return function;
 }
 
diff --git a/src/interface-descriptors.h b/src/interface-descriptors.h
index 60d8723..18fc9a8 100644
--- a/src/interface-descriptors.h
+++ b/src/interface-descriptors.h
@@ -13,84 +13,90 @@
 
 class PlatformInterfaceDescriptor;
 
-#define INTERFACE_DESCRIPTOR_LIST(V)          \
-  V(Void)                                     \
-  V(Load)                                     \
-  V(Store)                                    \
-  V(StoreTransition)                          \
-  V(VectorStoreTransition)                    \
-  V(VectorStoreICTrampoline)                  \
-  V(VectorStoreIC)                            \
-  V(LoadWithVector)                           \
-  V(FastArrayPush)                            \
-  V(FastNewClosure)                           \
-  V(FastNewContext)                           \
-  V(FastNewObject)                            \
-  V(FastNewRestParameter)                     \
-  V(FastNewSloppyArguments)                   \
-  V(FastNewStrictArguments)                   \
-  V(TypeConversion)                           \
-  V(Typeof)                                   \
-  V(FastCloneRegExp)                          \
-  V(FastCloneShallowArray)                    \
-  V(FastCloneShallowObject)                   \
-  V(CreateAllocationSite)                     \
-  V(CreateWeakCell)                           \
-  V(CallFunction)                             \
-  V(CallFunctionWithFeedback)                 \
-  V(CallFunctionWithFeedbackAndVector)        \
-  V(CallConstruct)                            \
-  V(CallTrampoline)                           \
-  V(ConstructStub)                            \
-  V(ConstructTrampoline)                      \
-  V(RegExpConstructResult)                    \
-  V(TransitionElementsKind)                   \
-  V(AllocateHeapNumber)                       \
-  V(AllocateFloat32x4)                        \
-  V(AllocateInt32x4)                          \
-  V(AllocateUint32x4)                         \
-  V(AllocateBool32x4)                         \
-  V(AllocateInt16x8)                          \
-  V(AllocateUint16x8)                         \
-  V(AllocateBool16x8)                         \
-  V(AllocateInt8x16)                          \
-  V(AllocateUint8x16)                         \
-  V(AllocateBool8x16)                         \
-  V(ArrayNoArgumentConstructor)               \
-  V(ArrayConstructorConstantArgCount)         \
-  V(ArrayConstructor)                         \
-  V(InternalArrayConstructorConstantArgCount) \
-  V(InternalArrayConstructor)                 \
-  V(Compare)                                  \
-  V(BinaryOp)                                 \
-  V(BinaryOpWithAllocationSite)               \
-  V(CountOp)                                  \
-  V(StringAdd)                                \
-  V(StringCompare)                            \
-  V(Keyed)                                    \
-  V(Named)                                    \
-  V(HasProperty)                              \
-  V(CallHandler)                              \
-  V(ArgumentAdaptor)                          \
-  V(ApiCallbackWith0Args)                     \
-  V(ApiCallbackWith1Args)                     \
-  V(ApiCallbackWith2Args)                     \
-  V(ApiCallbackWith3Args)                     \
-  V(ApiCallbackWith4Args)                     \
-  V(ApiCallbackWith5Args)                     \
-  V(ApiCallbackWith6Args)                     \
-  V(ApiCallbackWith7Args)                     \
-  V(ApiGetter)                                \
-  V(LoadGlobalViaContext)                     \
-  V(StoreGlobalViaContext)                    \
-  V(MathPowTagged)                            \
-  V(MathPowInteger)                           \
-  V(ContextOnly)                              \
-  V(GrowArrayElements)                        \
-  V(InterpreterDispatch)                      \
-  V(InterpreterPushArgsAndCall)               \
-  V(InterpreterPushArgsAndConstruct)          \
-  V(InterpreterCEntry)                        \
+#define INTERFACE_DESCRIPTOR_LIST(V)   \
+  V(Void)                              \
+  V(ContextOnly)                       \
+  V(OnStackWith1Args)                  \
+  V(OnStackWith2Args)                  \
+  V(OnStackWith3Args)                  \
+  V(OnStackWith4Args)                  \
+  V(OnStackWith5Args)                  \
+  V(OnStackWith6Args)                  \
+  V(OnStackWith7Args)                  \
+  V(Load)                              \
+  V(LoadGlobal)                        \
+  V(LoadGlobalWithVector)              \
+  V(Store)                             \
+  V(StoreTransition)                   \
+  V(VectorStoreTransition)             \
+  V(VectorStoreICTrampoline)           \
+  V(VectorStoreIC)                     \
+  V(LoadWithVector)                    \
+  V(VarArgFunction)                    \
+  V(FastNewClosure)                    \
+  V(FastNewContext)                    \
+  V(FastNewObject)                     \
+  V(FastNewRestParameter)              \
+  V(FastNewSloppyArguments)            \
+  V(FastNewStrictArguments)            \
+  V(TypeConversion)                    \
+  V(Typeof)                            \
+  V(FastCloneRegExp)                   \
+  V(FastCloneShallowArray)             \
+  V(FastCloneShallowObject)            \
+  V(CreateAllocationSite)              \
+  V(CreateWeakCell)                    \
+  V(CallFunction)                      \
+  V(CallFunctionWithFeedback)          \
+  V(CallFunctionWithFeedbackAndVector) \
+  V(CallConstruct)                     \
+  V(CallTrampoline)                    \
+  V(ConstructStub)                     \
+  V(ConstructTrampoline)               \
+  V(RegExpConstructResult)             \
+  V(TransitionElementsKind)            \
+  V(AllocateHeapNumber)                \
+  V(AllocateFloat32x4)                 \
+  V(AllocateInt32x4)                   \
+  V(AllocateUint32x4)                  \
+  V(AllocateBool32x4)                  \
+  V(AllocateInt16x8)                   \
+  V(AllocateUint16x8)                  \
+  V(AllocateBool16x8)                  \
+  V(AllocateInt8x16)                   \
+  V(AllocateUint8x16)                  \
+  V(AllocateBool8x16)                  \
+  V(ArrayNoArgumentConstructor)        \
+  V(ArraySingleArgumentConstructor)    \
+  V(ArrayNArgumentsConstructor)        \
+  V(Compare)                           \
+  V(BinaryOp)                          \
+  V(BinaryOpWithAllocationSite)        \
+  V(CountOp)                           \
+  V(StringAdd)                         \
+  V(StringCompare)                     \
+  V(Keyed)                             \
+  V(Named)                             \
+  V(HasProperty)                       \
+  V(CallHandler)                       \
+  V(ArgumentAdaptor)                   \
+  V(ApiCallbackWith0Args)              \
+  V(ApiCallbackWith1Args)              \
+  V(ApiCallbackWith2Args)              \
+  V(ApiCallbackWith3Args)              \
+  V(ApiCallbackWith4Args)              \
+  V(ApiCallbackWith5Args)              \
+  V(ApiCallbackWith6Args)              \
+  V(ApiCallbackWith7Args)              \
+  V(ApiGetter)                         \
+  V(StoreGlobalViaContext)             \
+  V(MathPowTagged)                     \
+  V(MathPowInteger)                    \
+  V(GrowArrayElements)                 \
+  V(InterpreterDispatch)               \
+  V(InterpreterPushArgsAndCall)        \
+  V(InterpreterPushArgsAndConstruct)   \
+  V(InterpreterCEntry)                 \
   V(ResumeGenerator)
 
 class CallInterfaceDescriptorData {
@@ -110,7 +116,7 @@
   // and register side by side (eg, RegRep(r1, Representation::Tagged()).
   // The same should go for the CodeStubDescriptor class.
   void InitializePlatformSpecific(
-      int register_parameter_count, Register* registers,
+      int register_parameter_count, const Register* registers,
       PlatformInterfaceDescriptor* platform_descriptor = NULL);
 
   bool IsInitialized() const { return register_param_count_ >= 0; }
@@ -219,6 +225,12 @@
     }
   }
 
+  // Initializes |data| using the platform dependent default set of registers.
+  // It is intended to be used for TurboFan stubs when particular set of
+  // registers does not matter.
+  static void DefaultInitializePlatformSpecific(
+      CallInterfaceDescriptorData* data, int register_parameter_count);
+
  private:
   const CallInterfaceDescriptorData* data_;
 };
@@ -230,6 +242,17 @@
   }                                                        \
   static inline CallDescriptors::Key key();
 
+#define DECLARE_DEFAULT_DESCRIPTOR(name, base, parameter_count)            \
+  DECLARE_DESCRIPTOR_WITH_BASE(name, base)                                 \
+ protected:                                                                \
+  void InitializePlatformSpecific(CallInterfaceDescriptorData* data)       \
+      override {                                                           \
+    DefaultInitializePlatformSpecific(data, parameter_count);              \
+  }                                                                        \
+  name(Isolate* isolate, CallDescriptors::Key key) : base(isolate, key) {} \
+                                                                           \
+ public:
+
 #define DECLARE_DESCRIPTOR(name, base)                                         \
   DECLARE_DESCRIPTOR_WITH_BASE(name, base)                                     \
  protected:                                                                    \
@@ -262,6 +285,77 @@
   DECLARE_DESCRIPTOR(VoidDescriptor, CallInterfaceDescriptor)
 };
 
+class ContextOnlyDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(ContextOnlyDescriptor, CallInterfaceDescriptor)
+};
+
+// The OnStackWith*ArgsDescriptors have a lot of boilerplate. The superclass
+// OnStackArgsDescriptorBase is not meant to be instantiated directly and has no
+// public constructors to ensure this is so.contains all the logic, and the
+//
+// Use OnStackArgsDescriptorBase::ForArgs(isolate, parameter_count) to
+// instantiate a descriptor with the number of args.
+class OnStackArgsDescriptorBase : public CallInterfaceDescriptor {
+ public:
+  static CallInterfaceDescriptor ForArgs(Isolate* isolate, int parameter_count);
+
+ protected:
+  OnStackArgsDescriptorBase(Isolate* isolate, CallDescriptors::Key key)
+      : CallInterfaceDescriptor(isolate, key) {}
+  void InitializePlatformSpecific(CallInterfaceDescriptorData* data) override;
+  FunctionType* BuildCallInterfaceDescriptorFunctionTypeWithArg(
+      Isolate* isolate, int register_parameter_count, int parameter_count);
+};
+
+class OnStackWith1ArgsDescriptor : public OnStackArgsDescriptorBase {
+ public:
+  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith1ArgsDescriptor,
+                                                     OnStackArgsDescriptorBase,
+                                                     1)
+};
+
+class OnStackWith2ArgsDescriptor : public OnStackArgsDescriptorBase {
+ public:
+  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith2ArgsDescriptor,
+                                                     OnStackArgsDescriptorBase,
+                                                     2)
+};
+
+class OnStackWith3ArgsDescriptor : public OnStackArgsDescriptorBase {
+ public:
+  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith3ArgsDescriptor,
+                                                     OnStackArgsDescriptorBase,
+                                                     3)
+};
+
+class OnStackWith4ArgsDescriptor : public OnStackArgsDescriptorBase {
+ public:
+  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith4ArgsDescriptor,
+                                                     OnStackArgsDescriptorBase,
+                                                     4)
+};
+
+class OnStackWith5ArgsDescriptor : public OnStackArgsDescriptorBase {
+ public:
+  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith5ArgsDescriptor,
+                                                     OnStackArgsDescriptorBase,
+                                                     5)
+};
+
+class OnStackWith6ArgsDescriptor : public OnStackArgsDescriptorBase {
+ public:
+  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith6ArgsDescriptor,
+                                                     OnStackArgsDescriptorBase,
+                                                     6)
+};
+
+class OnStackWith7ArgsDescriptor : public OnStackArgsDescriptorBase {
+ public:
+  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith7ArgsDescriptor,
+                                                     OnStackArgsDescriptorBase,
+                                                     7)
+};
 
 // LoadDescriptor is used by all stubs that implement Load/KeyedLoad ICs.
 class LoadDescriptor : public CallInterfaceDescriptor {
@@ -275,6 +369,17 @@
   static const Register SlotRegister();
 };
 
+class LoadGlobalDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(LoadGlobalDescriptor,
+                                               CallInterfaceDescriptor)
+
+  enum ParameterIndices { kSlotIndex };
+
+  static const Register SlotRegister() {
+    return LoadDescriptor::SlotRegister();
+  }
+};
 
 class StoreDescriptor : public CallInterfaceDescriptor {
  public:
@@ -378,6 +483,17 @@
   static const Register VectorRegister();
 };
 
+class LoadGlobalWithVectorDescriptor : public LoadGlobalDescriptor {
+ public:
+  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(LoadGlobalWithVectorDescriptor,
+                                               LoadGlobalDescriptor)
+
+  enum ParameterIndices { kSlotIndex, kVectorIndex };
+
+  static const Register VectorRegister() {
+    return LoadWithVectorDescriptor::VectorRegister();
+  }
+};
 
 class FastNewClosureDescriptor : public CallInterfaceDescriptor {
  public:
@@ -425,10 +541,7 @@
  public:
   enum ParameterIndices { kKeyIndex, kObjectIndex };
 
-  DECLARE_DESCRIPTOR(HasPropertyDescriptor, CallInterfaceDescriptor)
-
-  static const Register KeyRegister();
-  static const Register ObjectRegister();
+  DECLARE_DEFAULT_DESCRIPTOR(HasPropertyDescriptor, CallInterfaceDescriptor, 2)
 };
 
 class TypeofDescriptor : public CallInterfaceDescriptor {
@@ -532,15 +645,6 @@
 };
 
 
-class LoadGlobalViaContextDescriptor : public CallInterfaceDescriptor {
- public:
-  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(LoadGlobalViaContextDescriptor,
-                                               CallInterfaceDescriptor)
-
-  static const Register SlotRegister();
-};
-
-
 class StoreGlobalViaContextDescriptor : public CallInterfaceDescriptor {
  public:
   DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StoreGlobalViaContextDescriptor,
@@ -578,37 +682,36 @@
     kFunctionIndex,
     kAllocationSiteIndex,
     kArgumentCountIndex,
+    kFunctionParameterIndex,
     kContextIndex
   };
 };
 
-class ArrayConstructorConstantArgCountDescriptor
+class ArraySingleArgumentConstructorDescriptor
     : public CallInterfaceDescriptor {
  public:
-  DECLARE_DESCRIPTOR(ArrayConstructorConstantArgCountDescriptor,
-                     CallInterfaceDescriptor)
-};
-
-
-class ArrayConstructorDescriptor : public CallInterfaceDescriptor {
- public:
-  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ArrayConstructorDescriptor,
-                                               CallInterfaceDescriptor)
-};
-
-
-class InternalArrayConstructorConstantArgCountDescriptor
-    : public CallInterfaceDescriptor {
- public:
-  DECLARE_DESCRIPTOR(InternalArrayConstructorConstantArgCountDescriptor,
-                     CallInterfaceDescriptor)
-};
-
-
-class InternalArrayConstructorDescriptor : public CallInterfaceDescriptor {
- public:
   DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
-      InternalArrayConstructorDescriptor, CallInterfaceDescriptor)
+      ArraySingleArgumentConstructorDescriptor, CallInterfaceDescriptor)
+  enum ParameterIndices {
+    kFunctionIndex,
+    kAllocationSiteIndex,
+    kArgumentCountIndex,
+    kFunctionParameterIndex,
+    kArraySizeSmiParameterIndex,
+    kContextIndex
+  };
+};
+
+class ArrayNArgumentsConstructorDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
+      ArrayNArgumentsConstructorDescriptor, CallInterfaceDescriptor)
+  enum ParameterIndices {
+    kFunctionIndex,
+    kAllocationSiteIndex,
+    kArgumentCountIndex,
+    kContextIndex
+  };
 };
 
 
@@ -756,7 +859,6 @@
   static const Register CallbackRegister();
 };
 
-
 class MathPowTaggedDescriptor : public CallInterfaceDescriptor {
  public:
   DECLARE_DESCRIPTOR(MathPowTaggedDescriptor, CallInterfaceDescriptor)
@@ -764,7 +866,6 @@
   static const Register exponent();
 };
 
-
 class MathPowIntegerDescriptor : public CallInterfaceDescriptor {
  public:
   DECLARE_DESCRIPTOR(MathPowIntegerDescriptor, CallInterfaceDescriptor)
@@ -772,15 +873,9 @@
   static const Register exponent();
 };
 
-
-class ContextOnlyDescriptor : public CallInterfaceDescriptor {
+class VarArgFunctionDescriptor : public CallInterfaceDescriptor {
  public:
-  DECLARE_DESCRIPTOR(ContextOnlyDescriptor, CallInterfaceDescriptor)
-};
-
-class FastArrayPushDescriptor : public CallInterfaceDescriptor {
- public:
-  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(FastArrayPushDescriptor,
+  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(VarArgFunctionDescriptor,
                                                CallInterfaceDescriptor)
 };
 
diff --git a/src/interpreter/bytecode-array-builder.cc b/src/interpreter/bytecode-array-builder.cc
index 75bf631..c74fe7e 100644
--- a/src/interpreter/bytecode-array-builder.cc
+++ b/src/interpreter/bytecode-array-builder.cc
@@ -6,7 +6,10 @@
 
 #include "src/compiler.h"
 #include "src/interpreter/bytecode-array-writer.h"
+#include "src/interpreter/bytecode-dead-code-optimizer.h"
+#include "src/interpreter/bytecode-label.h"
 #include "src/interpreter/bytecode-peephole-optimizer.h"
+#include "src/interpreter/bytecode-register-optimizer.h"
 #include "src/interpreter/interpreter-intrinsics.h"
 
 namespace v8 {
@@ -22,29 +25,34 @@
       bytecode_generated_(false),
       constant_array_builder_(isolate, zone),
       handler_table_builder_(isolate, zone),
-      source_position_table_builder_(isolate, zone),
-      exit_seen_in_block_(false),
-      unbound_jumps_(0),
+      return_seen_in_block_(false),
       parameter_count_(parameter_count),
       local_register_count_(locals_count),
       context_register_count_(context_count),
       temporary_allocator_(zone, fixed_register_count()),
-      bytecode_array_writer_(zone, &source_position_table_builder_),
+      bytecode_array_writer_(isolate, zone, &constant_array_builder_),
       pipeline_(&bytecode_array_writer_) {
   DCHECK_GE(parameter_count_, 0);
   DCHECK_GE(context_register_count_, 0);
   DCHECK_GE(local_register_count_, 0);
 
+  if (FLAG_ignition_deadcode) {
+    pipeline_ = new (zone) BytecodeDeadCodeOptimizer(pipeline_);
+  }
+
   if (FLAG_ignition_peephole) {
     pipeline_ = new (zone)
         BytecodePeepholeOptimizer(&constant_array_builder_, pipeline_);
   }
 
+  if (FLAG_ignition_reo) {
+    pipeline_ = new (zone) BytecodeRegisterOptimizer(
+        zone, &temporary_allocator_, parameter_count, pipeline_);
+  }
+
   return_position_ =
       literal ? std::max(literal->start_position(), literal->end_position() - 1)
               : RelocInfo::kNoPosition;
-  LOG_CODE_EVENT(isolate_, CodeStartLinePosInfoRecordEvent(
-                               source_position_table_builder()));
 }
 
 Register BytecodeArrayBuilder::first_context_register() const {
@@ -52,132 +60,98 @@
   return Register(local_register_count_);
 }
 
-
 Register BytecodeArrayBuilder::last_context_register() const {
   DCHECK_GT(context_register_count_, 0);
   return Register(local_register_count_ + context_register_count_ - 1);
 }
 
-
 Register BytecodeArrayBuilder::Parameter(int parameter_index) const {
   DCHECK_GE(parameter_index, 0);
   return Register::FromParameterIndex(parameter_index, parameter_count());
 }
 
-
 bool BytecodeArrayBuilder::RegisterIsParameterOrLocal(Register reg) const {
   return reg.is_parameter() || reg.index() < locals_count();
 }
 
-
 Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray() {
-  DCHECK_EQ(0, unbound_jumps_);
-  DCHECK_EQ(bytecode_generated_, false);
-  DCHECK(exit_seen_in_block_);
-
-  pipeline()->FlushBasicBlock();
-  const ZoneVector<uint8_t>* bytecodes = bytecode_array_writer()->bytecodes();
-
-  int bytecode_size = static_cast<int>(bytecodes->size());
-
-  // All locals need a frame slot for the debugger, but may not be
-  // present in generated code.
-  int frame_size_for_locals = fixed_register_count() * kPointerSize;
-  int frame_size_used = bytecode_array_writer()->GetMaximumFrameSizeUsed();
-  int frame_size = std::max(frame_size_for_locals, frame_size_used);
-  Handle<FixedArray> constant_pool = constant_array_builder()->ToFixedArray();
-  Handle<FixedArray> handler_table = handler_table_builder()->ToHandlerTable();
-  Handle<ByteArray> source_position_table =
-      source_position_table_builder()->ToSourcePositionTable();
-  Handle<BytecodeArray> bytecode_array = isolate_->factory()->NewBytecodeArray(
-      bytecode_size, &bytecodes->front(), frame_size, parameter_count(),
-      constant_pool);
-  bytecode_array->set_handler_table(*handler_table);
-  bytecode_array->set_source_position_table(*source_position_table);
-
-  void* line_info = source_position_table_builder()->DetachJITHandlerData();
-  LOG_CODE_EVENT(isolate_, CodeEndLinePosInfoRecordEvent(
-                               AbstractCode::cast(*bytecode_array), line_info));
-
+  DCHECK(return_seen_in_block_);
+  DCHECK(!bytecode_generated_);
   bytecode_generated_ = true;
-  return bytecode_array;
+
+  Handle<FixedArray> handler_table = handler_table_builder()->ToHandlerTable();
+  return pipeline_->ToBytecodeArray(fixed_register_count(), parameter_count(),
+                                    handler_table);
 }
 
+namespace {
+
+static bool ExpressionPositionIsNeeded(Bytecode bytecode) {
+  // An expression position is always needed if filtering is turned
+  // off. Otherwise an expression is only needed if the bytecode has
+  // external side effects.
+  return !FLAG_ignition_filter_expression_positions ||
+         !Bytecodes::IsWithoutExternalSideEffects(bytecode);
+}
+
+}  // namespace
+
 void BytecodeArrayBuilder::AttachSourceInfo(BytecodeNode* node) {
   if (latest_source_info_.is_valid()) {
-    node->source_info().Update(latest_source_info_);
-    latest_source_info_.set_invalid();
+    // Statement positions need to be emitted immediately.  Expression
+    // positions can be pushed back until a bytecode is found that can
+    // throw. Hence we only invalidate the existing source position
+    // information if it is used.
+    if (latest_source_info_.is_statement() ||
+        ExpressionPositionIsNeeded(node->bytecode())) {
+      node->source_info().Clone(latest_source_info_);
+      latest_source_info_.set_invalid();
+    }
   }
 }
 
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
+                                  uint32_t operand1, uint32_t operand2,
+                                  uint32_t operand3) {
+  DCHECK(OperandsAreValid(bytecode, 4, operand0, operand1, operand2, operand3));
+  BytecodeNode node(bytecode, operand0, operand1, operand2, operand3);
+  AttachSourceInfo(&node);
+  pipeline()->Write(&node);
+}
+
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
+                                  uint32_t operand1, uint32_t operand2) {
+  DCHECK(OperandsAreValid(bytecode, 3, operand0, operand1, operand2));
+  BytecodeNode node(bytecode, operand0, operand1, operand2);
+  AttachSourceInfo(&node);
+  pipeline()->Write(&node);
+}
+
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
+                                  uint32_t operand1) {
+  DCHECK(OperandsAreValid(bytecode, 2, operand0, operand1));
+  BytecodeNode node(bytecode, operand0, operand1);
+  AttachSourceInfo(&node);
+  pipeline()->Write(&node);
+}
+
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0) {
+  DCHECK(OperandsAreValid(bytecode, 1, operand0));
+  BytecodeNode node(bytecode, operand0);
+  AttachSourceInfo(&node);
+  pipeline()->Write(&node);
+}
+
 void BytecodeArrayBuilder::Output(Bytecode bytecode) {
-  // Don't output dead code.
-  if (exit_seen_in_block_) return;
-
+  DCHECK(OperandsAreValid(bytecode, 0));
   BytecodeNode node(bytecode);
   AttachSourceInfo(&node);
   pipeline()->Write(&node);
 }
 
-void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
-                                        OperandScale operand_scale,
-                                        uint32_t operand0, uint32_t operand1,
-                                        uint32_t operand2, uint32_t operand3) {
-  // Don't output dead code.
-  if (exit_seen_in_block_) return;
-  DCHECK(OperandIsValid(bytecode, operand_scale, 0, operand0));
-  DCHECK(OperandIsValid(bytecode, operand_scale, 1, operand1));
-  DCHECK(OperandIsValid(bytecode, operand_scale, 2, operand2));
-  DCHECK(OperandIsValid(bytecode, operand_scale, 3, operand3));
-  BytecodeNode node(bytecode, operand0, operand1, operand2, operand3,
-                    operand_scale);
-  AttachSourceInfo(&node);
-  pipeline()->Write(&node);
-}
-
-void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
-                                        OperandScale operand_scale,
-                                        uint32_t operand0, uint32_t operand1,
-                                        uint32_t operand2) {
-  // Don't output dead code.
-  if (exit_seen_in_block_) return;
-  DCHECK(OperandIsValid(bytecode, operand_scale, 0, operand0));
-  DCHECK(OperandIsValid(bytecode, operand_scale, 1, operand1));
-  DCHECK(OperandIsValid(bytecode, operand_scale, 2, operand2));
-  BytecodeNode node(bytecode, operand0, operand1, operand2, operand_scale);
-  AttachSourceInfo(&node);
-  pipeline()->Write(&node);
-}
-
-void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
-                                        OperandScale operand_scale,
-                                        uint32_t operand0, uint32_t operand1) {
-  // Don't output dead code.
-  if (exit_seen_in_block_) return;
-  DCHECK(OperandIsValid(bytecode, operand_scale, 0, operand0));
-  DCHECK(OperandIsValid(bytecode, operand_scale, 1, operand1));
-  BytecodeNode node(bytecode, operand0, operand1, operand_scale);
-  AttachSourceInfo(&node);
-  pipeline()->Write(&node);
-}
-
-void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
-                                        OperandScale operand_scale,
-                                        uint32_t operand0) {
-  // Don't output dead code.
-  if (exit_seen_in_block_) return;
-  DCHECK(OperandIsValid(bytecode, operand_scale, 0, operand0));
-  BytecodeNode node(bytecode, operand0, operand_scale);
-  AttachSourceInfo(&node);
-  pipeline()->Write(&node);
-}
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
                                                             Register reg) {
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(reg.SizeOfOperand());
-  OutputScaled(BytecodeForBinaryOperation(op), operand_scale,
-               RegisterOperand(reg));
+  Output(BytecodeForBinaryOperation(op), RegisterOperand(reg));
   return *this;
 }
 
@@ -186,7 +160,6 @@
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LogicalNot() {
   Output(Bytecode::kToBooleanLogicalNot);
   return *this;
@@ -200,62 +173,47 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(Token::Value op,
                                                              Register reg) {
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(reg.SizeOfOperand());
-  OutputScaled(BytecodeForCompareOperation(op), operand_scale,
-               RegisterOperand(reg));
+  Output(BytecodeForCompareOperation(op), RegisterOperand(reg));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(
     v8::internal::Smi* smi) {
   int32_t raw_smi = smi->value();
   if (raw_smi == 0) {
     Output(Bytecode::kLdaZero);
   } else {
-    OperandSize operand_size = Bytecodes::SizeForSignedOperand(raw_smi);
-    OperandScale operand_scale = Bytecodes::OperandSizesToScale(operand_size);
-    OutputScaled(Bytecode::kLdaSmi, operand_scale,
-                 SignedOperand(raw_smi, operand_size));
+    Output(Bytecode::kLdaSmi, SignedOperand(raw_smi));
   }
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(Handle<Object> object) {
   size_t entry = GetConstantPoolEntry(object);
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(Bytecodes::SizeForUnsignedOperand(entry));
-  OutputScaled(Bytecode::kLdaConstant, operand_scale, UnsignedOperand(entry));
+  Output(Bytecode::kLdaConstant, UnsignedOperand(entry));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadUndefined() {
   Output(Bytecode::kLdaUndefined);
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNull() {
   Output(Bytecode::kLdaNull);
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadTheHole() {
   Output(Bytecode::kLdaTheHole);
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadTrue() {
   Output(Bytecode::kLdaTrue);
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadFalse() {
   Output(Bytecode::kLdaFalse);
   return *this;
@@ -263,43 +221,29 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAccumulatorWithRegister(
     Register reg) {
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(reg.SizeOfOperand());
-  OutputScaled(Bytecode::kLdar, operand_scale, RegisterOperand(reg));
+  Output(Bytecode::kLdar, RegisterOperand(reg));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreAccumulatorInRegister(
     Register reg) {
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(reg.SizeOfOperand());
-  OutputScaled(Bytecode::kStar, operand_scale, RegisterOperand(reg));
+  Output(Bytecode::kStar, RegisterOperand(reg));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::MoveRegister(Register from,
                                                          Register to) {
   DCHECK(from != to);
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(from.SizeOfOperand(), to.SizeOfOperand());
-  OutputScaled(Bytecode::kMov, operand_scale, RegisterOperand(from),
-               RegisterOperand(to));
+  Output(Bytecode::kMov, RegisterOperand(from), RegisterOperand(to));
   return *this;
 }
 
-BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(
-    const Handle<String> name, int feedback_slot, TypeofMode typeof_mode) {
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(int feedback_slot,
+                                                       TypeofMode typeof_mode) {
   // TODO(rmcilroy): Potentially store typeof information in an
   // operand rather than having extra bytecodes.
   Bytecode bytecode = BytecodeForLoadGlobal(typeof_mode);
-  size_t name_index = GetConstantPoolEntry(name);
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      Bytecodes::SizeForUnsignedOperand(name_index),
-      Bytecodes::SizeForUnsignedOperand(feedback_slot));
-  OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index),
-               UnsignedOperand(feedback_slot));
+  Output(bytecode, UnsignedOperand(feedback_slot));
   return *this;
 }
 
@@ -307,31 +251,21 @@
     const Handle<String> name, int feedback_slot, LanguageMode language_mode) {
   Bytecode bytecode = BytecodeForStoreGlobal(language_mode);
   size_t name_index = GetConstantPoolEntry(name);
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      Bytecodes::SizeForUnsignedOperand(name_index),
-      Bytecodes::SizeForUnsignedOperand(feedback_slot));
-  OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index),
-               UnsignedOperand(feedback_slot));
+  Output(bytecode, UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadContextSlot(Register context,
                                                             int slot_index) {
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      context.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(slot_index));
-  OutputScaled(Bytecode::kLdaContextSlot, operand_scale,
-               RegisterOperand(context), UnsignedOperand(slot_index));
+  Output(Bytecode::kLdaContextSlot, RegisterOperand(context),
+         UnsignedOperand(slot_index));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreContextSlot(Register context,
                                                              int slot_index) {
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      context.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(slot_index));
-  OutputScaled(Bytecode::kStaContextSlot, operand_scale,
-               RegisterOperand(context), UnsignedOperand(slot_index));
+  Output(Bytecode::kStaContextSlot, RegisterOperand(context),
+         UnsignedOperand(slot_index));
   return *this;
 }
 
@@ -341,9 +275,7 @@
                           ? Bytecode::kLdaLookupSlotInsideTypeof
                           : Bytecode::kLdaLookupSlot;
   size_t name_index = GetConstantPoolEntry(name);
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      Bytecodes::SizeForUnsignedOperand(name_index));
-  OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index));
+  Output(bytecode, UnsignedOperand(name_index));
   return *this;
 }
 
@@ -351,70 +283,52 @@
     const Handle<String> name, LanguageMode language_mode) {
   Bytecode bytecode = BytecodeForStoreLookupSlot(language_mode);
   size_t name_index = GetConstantPoolEntry(name);
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      Bytecodes::SizeForUnsignedOperand(name_index));
-  OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index));
+  Output(bytecode, UnsignedOperand(name_index));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
     Register object, const Handle<Name> name, int feedback_slot) {
   size_t name_index = GetConstantPoolEntry(name);
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      object.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(name_index),
-      Bytecodes::SizeForUnsignedOperand(feedback_slot));
-  OutputScaled(Bytecode::kLoadIC, operand_scale, RegisterOperand(object),
-               UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
+  Output(Bytecode::kLdaNamedProperty, RegisterOperand(object),
+         UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
     Register object, int feedback_slot) {
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      object.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(feedback_slot));
-  OutputScaled(Bytecode::kKeyedLoadIC, operand_scale, RegisterOperand(object),
-               UnsignedOperand(feedback_slot));
+  Output(Bytecode::kLdaKeyedProperty, RegisterOperand(object),
+         UnsignedOperand(feedback_slot));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
     Register object, const Handle<Name> name, int feedback_slot,
     LanguageMode language_mode) {
-  Bytecode bytecode = BytecodeForStoreIC(language_mode);
+  Bytecode bytecode = BytecodeForStoreNamedProperty(language_mode);
   size_t name_index = GetConstantPoolEntry(name);
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      object.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(name_index),
-      Bytecodes::SizeForUnsignedOperand(feedback_slot));
-  OutputScaled(bytecode, operand_scale, RegisterOperand(object),
-               UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
+  Output(bytecode, RegisterOperand(object), UnsignedOperand(name_index),
+         UnsignedOperand(feedback_slot));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
     Register object, Register key, int feedback_slot,
     LanguageMode language_mode) {
-  Bytecode bytecode = BytecodeForKeyedStoreIC(language_mode);
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      object.SizeOfOperand(), key.SizeOfOperand(),
-      Bytecodes::SizeForUnsignedOperand(feedback_slot));
-  OutputScaled(bytecode, operand_scale, RegisterOperand(object),
-               RegisterOperand(key), UnsignedOperand(feedback_slot));
+  Bytecode bytecode = BytecodeForStoreKeyedProperty(language_mode);
+  Output(bytecode, RegisterOperand(object), RegisterOperand(key),
+         UnsignedOperand(feedback_slot));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(
     Handle<SharedFunctionInfo> shared_info, PretenureFlag tenured) {
   size_t entry = GetConstantPoolEntry(shared_info);
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(Bytecodes::SizeForUnsignedOperand(entry));
-  OutputScaled(Bytecode::kCreateClosure, operand_scale, UnsignedOperand(entry),
-               UnsignedOperand(static_cast<size_t>(tenured)));
+  Output(Bytecode::kCreateClosure, UnsignedOperand(entry),
+         UnsignedOperand(static_cast<size_t>(tenured)));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArguments(
     CreateArgumentsType type) {
   // TODO(rmcilroy): Consider passing the type as a bytecode operand rather
@@ -425,71 +339,47 @@
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateRegExpLiteral(
     Handle<String> pattern, int literal_index, int flags) {
   size_t pattern_entry = GetConstantPoolEntry(pattern);
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      Bytecodes::SizeForUnsignedOperand(pattern_entry),
-      Bytecodes::SizeForUnsignedOperand(literal_index),
-      Bytecodes::SizeForUnsignedOperand(flags));
-  OutputScaled(Bytecode::kCreateRegExpLiteral, operand_scale,
-               UnsignedOperand(pattern_entry), UnsignedOperand(literal_index),
-               UnsignedOperand(flags));
+  Output(Bytecode::kCreateRegExpLiteral, UnsignedOperand(pattern_entry),
+         UnsignedOperand(literal_index), UnsignedOperand(flags));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArrayLiteral(
     Handle<FixedArray> constant_elements, int literal_index, int flags) {
   size_t constant_elements_entry = GetConstantPoolEntry(constant_elements);
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      Bytecodes::SizeForUnsignedOperand(constant_elements_entry),
-      Bytecodes::SizeForUnsignedOperand(literal_index),
-      Bytecodes::SizeForUnsignedOperand(flags));
-  OutputScaled(Bytecode::kCreateArrayLiteral, operand_scale,
-               UnsignedOperand(constant_elements_entry),
-               UnsignedOperand(literal_index), UnsignedOperand(flags));
+  Output(Bytecode::kCreateArrayLiteral,
+         UnsignedOperand(constant_elements_entry),
+         UnsignedOperand(literal_index), UnsignedOperand(flags));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateObjectLiteral(
     Handle<FixedArray> constant_properties, int literal_index, int flags) {
   size_t constant_properties_entry = GetConstantPoolEntry(constant_properties);
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      Bytecodes::SizeForUnsignedOperand(constant_properties_entry),
-      Bytecodes::SizeForUnsignedOperand(literal_index),
-      Bytecodes::SizeForUnsignedOperand(flags));
-  OutputScaled(Bytecode::kCreateObjectLiteral, operand_scale,
-               UnsignedOperand(constant_properties_entry),
-               UnsignedOperand(literal_index), UnsignedOperand(flags));
+  Output(Bytecode::kCreateObjectLiteral,
+         UnsignedOperand(constant_properties_entry),
+         UnsignedOperand(literal_index), UnsignedOperand(flags));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::PushContext(Register context) {
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(context.SizeOfOperand());
-  OutputScaled(Bytecode::kPushContext, operand_scale, RegisterOperand(context));
+  Output(Bytecode::kPushContext, RegisterOperand(context));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::PopContext(Register context) {
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(context.SizeOfOperand());
-  OutputScaled(Bytecode::kPopContext, operand_scale, RegisterOperand(context));
+  Output(Bytecode::kPopContext, RegisterOperand(context));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToJSObject() {
   Output(Bytecode::kToObject);
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToName() {
   Output(Bytecode::kToName);
   return *this;
@@ -500,207 +390,24 @@
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeLabel* label) {
-  size_t current_offset = pipeline()->FlushForOffset();
-  if (label->is_forward_target()) {
-    // An earlier jump instruction refers to this label. Update it's location.
-    PatchJump(current_offset, label->offset());
-    // Now treat as if the label will only be back referred to.
-  }
-  label->bind_to(current_offset);
+  pipeline_->BindLabel(label);
   LeaveBasicBlock();
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(const BytecodeLabel& target,
                                                  BytecodeLabel* label) {
-  DCHECK(!label->is_bound());
-  DCHECK(target.is_bound());
-  // There is no need to flush the pipeline here, it will have been
-  // flushed when |target| was bound.
-  if (label->is_forward_target()) {
-    // An earlier jump instruction refers to this label. Update it's location.
-    PatchJump(target.offset(), label->offset());
-    // Now treat as if the label will only be back referred to.
-  }
-  label->bind_to(target.offset());
+  pipeline_->BindLabel(target, label);
   LeaveBasicBlock();
   return *this;
 }
 
-
-// static
-Bytecode BytecodeArrayBuilder::GetJumpWithConstantOperand(
-    Bytecode jump_bytecode) {
-  switch (jump_bytecode) {
-    case Bytecode::kJump:
-      return Bytecode::kJumpConstant;
-    case Bytecode::kJumpIfTrue:
-      return Bytecode::kJumpIfTrueConstant;
-    case Bytecode::kJumpIfFalse:
-      return Bytecode::kJumpIfFalseConstant;
-    case Bytecode::kJumpIfToBooleanTrue:
-      return Bytecode::kJumpIfToBooleanTrueConstant;
-    case Bytecode::kJumpIfToBooleanFalse:
-      return Bytecode::kJumpIfToBooleanFalseConstant;
-    case Bytecode::kJumpIfNotHole:
-      return Bytecode::kJumpIfNotHoleConstant;
-    case Bytecode::kJumpIfNull:
-      return Bytecode::kJumpIfNullConstant;
-    case Bytecode::kJumpIfUndefined:
-      return Bytecode::kJumpIfUndefinedConstant;
-    default:
-      UNREACHABLE();
-      return Bytecode::kIllegal;
-  }
-}
-
-void BytecodeArrayBuilder::PatchJumpWith8BitOperand(
-    ZoneVector<uint8_t>* bytecodes, size_t jump_location, int delta) {
-  Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes->at(jump_location));
-  DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
-  size_t operand_location = jump_location + 1;
-  DCHECK_EQ(bytecodes->at(operand_location), 0);
-  if (Bytecodes::SizeForSignedOperand(delta) == OperandSize::kByte) {
-    // The jump fits within the range of an Imm operand, so cancel
-    // the reservation and jump directly.
-    constant_array_builder()->DiscardReservedEntry(OperandSize::kByte);
-    bytecodes->at(operand_location) = static_cast<uint8_t>(delta);
-  } else {
-    // The jump does not fit within the range of an Imm operand, so
-    // commit reservation putting the offset into the constant pool,
-    // and update the jump instruction and operand.
-    size_t entry = constant_array_builder()->CommitReservedEntry(
-        OperandSize::kByte, handle(Smi::FromInt(delta), isolate()));
-    DCHECK(Bytecodes::SizeForUnsignedOperand(entry) == OperandSize::kByte);
-    jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
-    bytecodes->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
-    bytecodes->at(operand_location) = static_cast<uint8_t>(entry);
-  }
-}
-
-void BytecodeArrayBuilder::PatchJumpWith16BitOperand(
-    ZoneVector<uint8_t>* bytecodes, size_t jump_location, int delta) {
-  Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes->at(jump_location));
-  DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
-  size_t operand_location = jump_location + 1;
-  uint8_t operand_bytes[2];
-  if (Bytecodes::SizeForSignedOperand(delta) <= OperandSize::kShort) {
-    constant_array_builder()->DiscardReservedEntry(OperandSize::kShort);
-    WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(delta));
-  } else {
-    jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
-    bytecodes->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
-    size_t entry = constant_array_builder()->CommitReservedEntry(
-        OperandSize::kShort, handle(Smi::FromInt(delta), isolate()));
-    WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry));
-  }
-  DCHECK(bytecodes->at(operand_location) == 0 &&
-         bytecodes->at(operand_location + 1) == 0);
-  bytecodes->at(operand_location++) = operand_bytes[0];
-  bytecodes->at(operand_location) = operand_bytes[1];
-}
-
-void BytecodeArrayBuilder::PatchJumpWith32BitOperand(
-    ZoneVector<uint8_t>* bytecodes, size_t jump_location, int delta) {
-  DCHECK(Bytecodes::IsJumpImmediate(
-      Bytecodes::FromByte(bytecodes->at(jump_location))));
-  constant_array_builder()->DiscardReservedEntry(OperandSize::kQuad);
-  uint8_t operand_bytes[4];
-  WriteUnalignedUInt32(operand_bytes, static_cast<uint32_t>(delta));
-  size_t operand_location = jump_location + 1;
-  DCHECK(bytecodes->at(operand_location) == 0 &&
-         bytecodes->at(operand_location + 1) == 0 &&
-         bytecodes->at(operand_location + 2) == 0 &&
-         bytecodes->at(operand_location + 3) == 0);
-  bytecodes->at(operand_location++) = operand_bytes[0];
-  bytecodes->at(operand_location++) = operand_bytes[1];
-  bytecodes->at(operand_location++) = operand_bytes[2];
-  bytecodes->at(operand_location) = operand_bytes[3];
-}
-
-void BytecodeArrayBuilder::PatchJump(size_t jump_target, size_t jump_location) {
-  ZoneVector<uint8_t>* bytecodes = bytecode_array_writer()->bytecodes();
-  Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes->at(jump_location));
-  int delta = static_cast<int>(jump_target - jump_location);
-  int prefix_offset = 0;
-  OperandScale operand_scale = OperandScale::kSingle;
-  if (Bytecodes::IsPrefixScalingBytecode(jump_bytecode)) {
-    // If a prefix scaling bytecode is emitted the target offset is one
-    // less than the case of no prefix scaling bytecode.
-    delta -= 1;
-    prefix_offset = 1;
-    operand_scale = Bytecodes::PrefixBytecodeToOperandScale(jump_bytecode);
-    jump_bytecode =
-        Bytecodes::FromByte(bytecodes->at(jump_location + prefix_offset));
-  }
-
-  DCHECK(Bytecodes::IsJump(jump_bytecode));
-  switch (operand_scale) {
-    case OperandScale::kSingle:
-      PatchJumpWith8BitOperand(bytecodes, jump_location, delta);
-      break;
-    case OperandScale::kDouble:
-      PatchJumpWith16BitOperand(bytecodes, jump_location + prefix_offset,
-                                delta);
-      break;
-    case OperandScale::kQuadruple:
-      PatchJumpWith32BitOperand(bytecodes, jump_location + prefix_offset,
-                                delta);
-      break;
-    default:
-      UNREACHABLE();
-  }
-  unbound_jumps_--;
-}
-
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::OutputJump(Bytecode jump_bytecode,
                                                        BytecodeLabel* label) {
-  // Don't emit dead code.
-  if (exit_seen_in_block_) return *this;
-
-  if (label->is_bound()) {
-    // Label has been bound already so this is a backwards jump.
-    size_t current_offset = pipeline()->FlushForOffset();
-    CHECK_GE(current_offset, label->offset());
-    CHECK_LE(current_offset, static_cast<size_t>(kMaxInt));
-    size_t abs_delta = current_offset - label->offset();
-    int delta = -static_cast<int>(abs_delta);
-    OperandSize operand_size = Bytecodes::SizeForSignedOperand(delta);
-    if (operand_size > OperandSize::kByte) {
-      // Adjust for scaling byte prefix for wide jump offset.
-      DCHECK_LE(delta, 0);
-      delta -= 1;
-    }
-    OutputScaled(jump_bytecode, Bytecodes::OperandSizesToScale(operand_size),
-                 SignedOperand(delta, operand_size));
-  } else {
-    // The label has not yet been bound so this is a forward reference
-    // that will be patched when the label is bound. We create a
-    // reservation in the constant pool so the jump can be patched
-    // when the label is bound. The reservation means the maximum size
-    // of the operand for the constant is known and the jump can
-    // be emitted into the bytecode stream with space for the operand.
-    unbound_jumps_++;
-    OperandSize reserved_operand_size =
-        constant_array_builder()->CreateReservedEntry();
-    OutputScaled(jump_bytecode,
-                 Bytecodes::OperandSizesToScale(reserved_operand_size), 0);
-
-    // Calculate the label position by flushing for offset after emitting the
-    // jump bytecode.
-    size_t offset = pipeline()->FlushForOffset();
-    OperandScale operand_scale =
-        Bytecodes::OperandSizesToScale(reserved_operand_size);
-    offset -= Bytecodes::Size(jump_bytecode, operand_scale);
-    if (Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale)) {
-      offset -= 1;
-    }
-    label->set_referrer(offset);
-  }
+  BytecodeNode node(jump_bytecode, 0);
+  AttachSourceInfo(&node);
+  pipeline_->WriteJump(&node, label);
   LeaveBasicBlock();
   return *this;
 }
@@ -730,39 +437,43 @@
   return OutputJump(Bytecode::kJumpIfUndefined, label);
 }
 
-BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck(int position) {
-  if (position != RelocInfo::kNoPosition) {
-    // We need to attach a non-breakable source position to a stack check,
-    // so we simply add it as expression position.
-    latest_source_info_.Update({position, false});
-  }
-  Output(Bytecode::kStackCheck);
-  return *this;
-}
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNotHole(
     BytecodeLabel* label) {
   return OutputJump(Bytecode::kJumpIfNotHole, label);
 }
 
-BytecodeArrayBuilder& BytecodeArrayBuilder::Throw() {
-  Output(Bytecode::kThrow);
-  exit_seen_in_block_ = true;
+BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck(int position) {
+  if (position != RelocInfo::kNoPosition) {
+    // We need to attach a non-breakable source position to a stack
+    // check, so we simply add it as expression position. There can be
+    // a prior statement position from constructs like:
+    //
+    //    do var x;  while (false);
+    //
+    // A Nop could be inserted for empty statements, but since no code
+    // is associated with these positions, instead we force the stack
+    // check's expression position which eliminates the empty
+    // statement's position.
+    latest_source_info_.ForceExpressionPosition(position);
+  }
+  Output(Bytecode::kStackCheck);
   return *this;
 }
 
+BytecodeArrayBuilder& BytecodeArrayBuilder::Throw() {
+  Output(Bytecode::kThrow);
+  return *this;
+}
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ReThrow() {
   Output(Bytecode::kReThrow);
-  exit_seen_in_block_ = true;
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::Return() {
   SetReturnPosition();
   Output(Bytecode::kReturn);
-  exit_seen_in_block_ = true;
+  return_seen_in_block_ = true;
   return *this;
 }
 
@@ -773,100 +484,74 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(
     Register cache_info_triple) {
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(cache_info_triple.SizeOfOperand());
-  OutputScaled(Bytecode::kForInPrepare, operand_scale,
-               RegisterOperand(cache_info_triple));
+  Output(Bytecode::kForInPrepare, RegisterOperand(cache_info_triple));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInDone(Register index,
                                                       Register cache_length) {
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      index.SizeOfOperand(), cache_length.SizeOfOperand());
-  OutputScaled(Bytecode::kForInDone, operand_scale, RegisterOperand(index),
-               RegisterOperand(cache_length));
+  Output(Bytecode::kForInDone, RegisterOperand(index),
+         RegisterOperand(cache_length));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInNext(
     Register receiver, Register index, Register cache_type_array_pair,
     int feedback_slot) {
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      receiver.SizeOfOperand(), index.SizeOfOperand(),
-      cache_type_array_pair.SizeOfOperand(),
-      Bytecodes::SizeForUnsignedOperand(feedback_slot));
-  OutputScaled(Bytecode::kForInNext, operand_scale, RegisterOperand(receiver),
-               RegisterOperand(index), RegisterOperand(cache_type_array_pair),
-               UnsignedOperand(feedback_slot));
+  Output(Bytecode::kForInNext, RegisterOperand(receiver),
+         RegisterOperand(index), RegisterOperand(cache_type_array_pair),
+         UnsignedOperand(feedback_slot));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInStep(Register index) {
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(index.SizeOfOperand());
-  OutputScaled(Bytecode::kForInStep, operand_scale, RegisterOperand(index));
+  Output(Bytecode::kForInStep, RegisterOperand(index));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::SuspendGenerator(
     Register generator) {
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(generator.SizeOfOperand());
-  OutputScaled(Bytecode::kSuspendGenerator, operand_scale,
-               RegisterOperand(generator));
+  Output(Bytecode::kSuspendGenerator, RegisterOperand(generator));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::ResumeGenerator(
     Register generator) {
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(generator.SizeOfOperand());
-  OutputScaled(Bytecode::kResumeGenerator, operand_scale,
-               RegisterOperand(generator));
+  Output(Bytecode::kResumeGenerator, RegisterOperand(generator));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::MarkHandler(int handler_id,
                                                         bool will_catch) {
-  size_t offset = pipeline()->FlushForOffset();
-  handler_table_builder()->SetHandlerTarget(handler_id, offset);
+  BytecodeLabel handler;
+  Bind(&handler);
+  handler_table_builder()->SetHandlerTarget(handler_id, handler.offset());
   handler_table_builder()->SetPrediction(handler_id, will_catch);
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryBegin(int handler_id,
                                                          Register context) {
-  size_t offset = pipeline()->FlushForOffset();
-  handler_table_builder()->SetTryRegionStart(handler_id, offset);
+  BytecodeLabel try_begin;
+  Bind(&try_begin);
+  handler_table_builder()->SetTryRegionStart(handler_id, try_begin.offset());
   handler_table_builder()->SetContextRegister(handler_id, context);
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryEnd(int handler_id) {
-  size_t offset = pipeline()->FlushForOffset();
-  handler_table_builder()->SetTryRegionEnd(handler_id, offset);
+  BytecodeLabel try_end;
+  Bind(&try_end);
+  handler_table_builder()->SetTryRegionEnd(handler_id, try_end.offset());
   return *this;
 }
 
-
-void BytecodeArrayBuilder::LeaveBasicBlock() {
-  exit_seen_in_block_ = false;
-  pipeline()->FlushBasicBlock();
-}
-
 void BytecodeArrayBuilder::EnsureReturn() {
-  if (!exit_seen_in_block_) {
+  if (!return_seen_in_block_) {
     LoadUndefined();
     Return();
   }
-  DCHECK(exit_seen_in_block_);
+  DCHECK(return_seen_in_block_);
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
@@ -875,14 +560,8 @@
                                                  int feedback_slot,
                                                  TailCallMode tail_call_mode) {
   Bytecode bytecode = BytecodeForCall(tail_call_mode);
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      callable.SizeOfOperand(), receiver_args.SizeOfOperand(),
-      Bytecodes::SizeForUnsignedOperand(receiver_args_count),
-      Bytecodes::SizeForUnsignedOperand(feedback_slot));
-  OutputScaled(bytecode, operand_scale, RegisterOperand(callable),
-               RegisterOperand(receiver_args),
-               UnsignedOperand(receiver_args_count),
-               UnsignedOperand(feedback_slot));
+  Output(bytecode, RegisterOperand(callable), RegisterOperand(receiver_args),
+         UnsignedOperand(receiver_args_count), UnsignedOperand(feedback_slot));
   return *this;
 }
 
@@ -893,15 +572,11 @@
     DCHECK_EQ(0u, arg_count);
     first_arg = Register(0);
   }
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      constructor.SizeOfOperand(), first_arg.SizeOfOperand(),
-      Bytecodes::SizeForUnsignedOperand(arg_count));
-  OutputScaled(Bytecode::kNew, operand_scale, RegisterOperand(constructor),
-               RegisterOperand(first_arg), UnsignedOperand(arg_count));
+  Output(Bytecode::kNew, RegisterOperand(constructor),
+         RegisterOperand(first_arg), UnsignedOperand(arg_count));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
     Runtime::FunctionId function_id, Register first_arg, size_t arg_count) {
   DCHECK_EQ(1, Runtime::FunctionForId(function_id)->result_size);
@@ -910,17 +585,19 @@
     DCHECK_EQ(0u, arg_count);
     first_arg = Register(0);
   }
-  Bytecode bytecode = IntrinsicsHelper::IsSupported(function_id)
-                          ? Bytecode::kInvokeIntrinsic
-                          : Bytecode::kCallRuntime;
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      first_arg.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(arg_count));
-  OutputScaled(bytecode, operand_scale, static_cast<uint16_t>(function_id),
-               RegisterOperand(first_arg), UnsignedOperand(arg_count));
+  Bytecode bytecode;
+  uint32_t id;
+  if (IntrinsicsHelper::IsSupported(function_id)) {
+    bytecode = Bytecode::kInvokeIntrinsic;
+    id = static_cast<uint32_t>(IntrinsicsHelper::FromRuntimeId(function_id));
+  } else {
+    bytecode = Bytecode::kCallRuntime;
+    id = static_cast<uint32_t>(function_id);
+  }
+  Output(bytecode, id, RegisterOperand(first_arg), UnsignedOperand(arg_count));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntimeForPair(
     Runtime::FunctionId function_id, Register first_arg, size_t arg_count,
     Register first_return) {
@@ -930,34 +607,22 @@
     DCHECK_EQ(0u, arg_count);
     first_arg = Register(0);
   }
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      first_arg.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(arg_count),
-      first_return.SizeOfOperand());
-  OutputScaled(Bytecode::kCallRuntimeForPair, operand_scale,
-               static_cast<uint16_t>(function_id), RegisterOperand(first_arg),
-               UnsignedOperand(arg_count), RegisterOperand(first_return));
+  Output(Bytecode::kCallRuntimeForPair, static_cast<uint16_t>(function_id),
+         RegisterOperand(first_arg), UnsignedOperand(arg_count),
+         RegisterOperand(first_return));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(
     int context_index, Register receiver_args, size_t receiver_args_count) {
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      Bytecodes::SizeForUnsignedOperand(context_index),
-      receiver_args.SizeOfOperand(),
-      Bytecodes::SizeForUnsignedOperand(receiver_args_count));
-  OutputScaled(Bytecode::kCallJSRuntime, operand_scale,
-               UnsignedOperand(context_index), RegisterOperand(receiver_args),
-               UnsignedOperand(receiver_args_count));
+  Output(Bytecode::kCallJSRuntime, UnsignedOperand(context_index),
+         RegisterOperand(receiver_args), UnsignedOperand(receiver_args_count));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
                                                    LanguageMode language_mode) {
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(object.SizeOfOperand());
-  OutputScaled(BytecodeForDelete(language_mode), operand_scale,
-               RegisterOperand(object));
+  Output(BytecodeForDelete(language_mode), RegisterOperand(object));
   return *this;
 }
 
@@ -967,101 +632,37 @@
 
 void BytecodeArrayBuilder::SetReturnPosition() {
   if (return_position_ == RelocInfo::kNoPosition) return;
-  if (exit_seen_in_block_) return;
-  latest_source_info_.Update({return_position_, true});
+  latest_source_info_.MakeStatementPosition(return_position_);
 }
 
 void BytecodeArrayBuilder::SetStatementPosition(Statement* stmt) {
   if (stmt->position() == RelocInfo::kNoPosition) return;
-  if (exit_seen_in_block_) return;
-  latest_source_info_.Update({stmt->position(), true});
+  latest_source_info_.MakeStatementPosition(stmt->position());
 }
 
 void BytecodeArrayBuilder::SetExpressionPosition(Expression* expr) {
   if (expr->position() == RelocInfo::kNoPosition) return;
-  if (exit_seen_in_block_) return;
-  latest_source_info_.Update({expr->position(), false});
+  if (!latest_source_info_.is_statement()) {
+    // Ensure the current expression position is overwritten with the
+    // latest value.
+    latest_source_info_.MakeExpressionPosition(expr->position());
+  }
 }
 
 void BytecodeArrayBuilder::SetExpressionAsStatementPosition(Expression* expr) {
   if (expr->position() == RelocInfo::kNoPosition) return;
-  if (exit_seen_in_block_) return;
-  latest_source_info_.Update({expr->position(), true});
+  latest_source_info_.MakeStatementPosition(expr->position());
 }
 
 bool BytecodeArrayBuilder::TemporaryRegisterIsLive(Register reg) const {
   return temporary_register_allocator()->RegisterIsLive(reg);
 }
 
-bool BytecodeArrayBuilder::OperandIsValid(Bytecode bytecode,
-                                          OperandScale operand_scale,
-                                          int operand_index,
-                                          uint32_t operand_value) const {
-  OperandSize operand_size =
-      Bytecodes::GetOperandSize(bytecode, operand_index, operand_scale);
-  OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
-  switch (operand_type) {
-    case OperandType::kNone:
-      return false;
-    case OperandType::kRegCount: {
-      if (operand_index > 0) {
-        OperandType previous_operand_type =
-            Bytecodes::GetOperandType(bytecode, operand_index - 1);
-        if (previous_operand_type != OperandType::kMaybeReg &&
-            previous_operand_type != OperandType::kReg) {
-          return false;
-        }
-      }
-    }  // Fall-through
-    case OperandType::kFlag8:
-    case OperandType::kIdx:
-    case OperandType::kRuntimeId:
-    case OperandType::kImm: {
-      size_t unsigned_value = static_cast<size_t>(operand_value);
-      return Bytecodes::SizeForUnsignedOperand(unsigned_value) <= operand_size;
-    }
-    case OperandType::kMaybeReg:
-      if (RegisterFromOperand(operand_value) == Register(0)) {
-        return true;
-      }
-    // Fall-through to kReg case.
-    case OperandType::kReg:
-    case OperandType::kRegOut: {
-      Register reg = RegisterFromOperand(operand_value);
-      return RegisterIsValid(reg, operand_size);
-    }
-    case OperandType::kRegOutPair:
-    case OperandType::kRegPair: {
-      Register reg0 = RegisterFromOperand(operand_value);
-      Register reg1 = Register(reg0.index() + 1);
-      // The size of reg1 is immaterial.
-      return RegisterIsValid(reg0, operand_size) &&
-             RegisterIsValid(reg1, OperandSize::kQuad);
-    }
-    case OperandType::kRegOutTriple: {
-      Register reg0 = RegisterFromOperand(operand_value);
-      Register reg1 = Register(reg0.index() + 1);
-      Register reg2 = Register(reg0.index() + 2);
-      // The size of reg1 and reg2 is immaterial.
-      return RegisterIsValid(reg0, operand_size) &&
-             RegisterIsValid(reg1, OperandSize::kQuad) &&
-             RegisterIsValid(reg2, OperandSize::kQuad);
-    }
-  }
-  UNREACHABLE();
-  return false;
-}
-
-bool BytecodeArrayBuilder::RegisterIsValid(Register reg,
-                                           OperandSize reg_size) const {
+bool BytecodeArrayBuilder::RegisterIsValid(Register reg) const {
   if (!reg.is_valid()) {
     return false;
   }
 
-  if (reg.SizeOfOperand() > reg_size) {
-    return false;
-  }
-
   if (reg.is_current_context() || reg.is_function_closure() ||
       reg.is_new_target()) {
     return true;
@@ -1075,6 +676,90 @@
   }
 }
 
+bool BytecodeArrayBuilder::OperandsAreValid(
+    Bytecode bytecode, int operand_count, uint32_t operand0, uint32_t operand1,
+    uint32_t operand2, uint32_t operand3) const {
+  if (Bytecodes::NumberOfOperands(bytecode) != operand_count) {
+    return false;
+  }
+
+  uint32_t operands[] = {operand0, operand1, operand2, operand3};
+  const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
+  for (int i = 0; i < operand_count; ++i) {
+    switch (operand_types[i]) {
+      case OperandType::kNone:
+        return false;
+      case OperandType::kRegCount: {
+        CHECK_NE(i, 0);
+        CHECK(operand_types[i - 1] == OperandType::kMaybeReg ||
+              operand_types[i - 1] == OperandType::kReg);
+        if (i > 0 && operands[i] > 0) {
+          Register start = Register::FromOperand(operands[i - 1]);
+          Register end(start.index() + static_cast<int>(operands[i]) - 1);
+          if (!RegisterIsValid(start) || !RegisterIsValid(end) || start > end) {
+            return false;
+          }
+        }
+        break;
+      }
+      case OperandType::kFlag8:
+      case OperandType::kIntrinsicId:
+        if (Bytecodes::SizeForUnsignedOperand(operands[i]) >
+            OperandSize::kByte) {
+          return false;
+        }
+        break;
+      case OperandType::kRuntimeId:
+        if (Bytecodes::SizeForUnsignedOperand(operands[i]) >
+            OperandSize::kShort) {
+          return false;
+        }
+        break;
+      case OperandType::kIdx:
+        // TODO(oth): Consider splitting OperandType::kIdx into two
+        // operand types. One which is a constant pool index that can
+        // be checked, and the other is an unsigned value.
+        break;
+      case OperandType::kImm:
+        break;
+      case OperandType::kMaybeReg:
+        if (Register::FromOperand(operands[i]) == Register(0)) {
+          break;
+        }
+      // Fall-through to kReg case.
+      case OperandType::kReg:
+      case OperandType::kRegOut: {
+        Register reg = Register::FromOperand(operands[i]);
+        if (!RegisterIsValid(reg)) {
+          return false;
+        }
+        break;
+      }
+      case OperandType::kRegOutPair:
+      case OperandType::kRegPair: {
+        Register reg0 = Register::FromOperand(operands[i]);
+        Register reg1 = Register(reg0.index() + 1);
+        if (!RegisterIsValid(reg0) || !RegisterIsValid(reg1)) {
+          return false;
+        }
+        break;
+      }
+      case OperandType::kRegOutTriple: {
+        Register reg0 = Register::FromOperand(operands[i]);
+        Register reg1 = Register(reg0.index() + 1);
+        Register reg2 = Register(reg0.index() + 2);
+        if (!RegisterIsValid(reg0) || !RegisterIsValid(reg1) ||
+            !RegisterIsValid(reg2)) {
+          return false;
+        }
+        break;
+      }
+    }
+  }
+
+  return true;
+}
+
 // static
 Bytecode BytecodeArrayBuilder::BytecodeForBinaryOperation(Token::Value op) {
   switch (op) {
@@ -1106,7 +791,6 @@
   }
 }
 
-
 // static
 Bytecode BytecodeArrayBuilder::BytecodeForCountOperation(Token::Value op) {
   switch (op) {
@@ -1120,7 +804,6 @@
   }
 }
 
-
 // static
 Bytecode BytecodeArrayBuilder::BytecodeForCompareOperation(Token::Value op) {
   switch (op) {
@@ -1148,35 +831,33 @@
   }
 }
 
-
 // static
-Bytecode BytecodeArrayBuilder::BytecodeForStoreIC(LanguageMode language_mode) {
-  switch (language_mode) {
-    case SLOPPY:
-      return Bytecode::kStoreICSloppy;
-    case STRICT:
-      return Bytecode::kStoreICStrict;
-    default:
-      UNREACHABLE();
-  }
-  return Bytecode::kIllegal;
-}
-
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForKeyedStoreIC(
+Bytecode BytecodeArrayBuilder::BytecodeForStoreNamedProperty(
     LanguageMode language_mode) {
   switch (language_mode) {
     case SLOPPY:
-      return Bytecode::kKeyedStoreICSloppy;
+      return Bytecode::kStaNamedPropertySloppy;
     case STRICT:
-      return Bytecode::kKeyedStoreICStrict;
+      return Bytecode::kStaNamedPropertyStrict;
     default:
       UNREACHABLE();
   }
   return Bytecode::kIllegal;
 }
 
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForStoreKeyedProperty(
+    LanguageMode language_mode) {
+  switch (language_mode) {
+    case SLOPPY:
+      return Bytecode::kStaKeyedPropertySloppy;
+    case STRICT:
+      return Bytecode::kStaKeyedPropertyStrict;
+    default:
+      UNREACHABLE();
+  }
+  return Bytecode::kIllegal;
+}
 
 // static
 Bytecode BytecodeArrayBuilder::BytecodeForLoadGlobal(TypeofMode typeof_mode) {
@@ -1184,7 +865,6 @@
                                       : Bytecode::kLdaGlobal;
 }
 
-
 // static
 Bytecode BytecodeArrayBuilder::BytecodeForStoreGlobal(
     LanguageMode language_mode) {
@@ -1199,7 +879,6 @@
   return Bytecode::kIllegal;
 }
 
-
 // static
 Bytecode BytecodeArrayBuilder::BytecodeForStoreLookupSlot(
     LanguageMode language_mode) {
@@ -1229,7 +908,6 @@
   return Bytecode::kIllegal;
 }
 
-
 // static
 Bytecode BytecodeArrayBuilder::BytecodeForDelete(LanguageMode language_mode) {
   switch (language_mode) {
@@ -1256,38 +934,6 @@
   return Bytecode::kIllegal;
 }
 
-uint32_t BytecodeArrayBuilder::RegisterOperand(Register reg) {
-  return static_cast<uint32_t>(reg.ToOperand());
-}
-
-Register BytecodeArrayBuilder::RegisterFromOperand(uint32_t operand) {
-  return Register::FromOperand(static_cast<int32_t>(operand));
-}
-
-uint32_t BytecodeArrayBuilder::SignedOperand(int value, OperandSize size) {
-  switch (size) {
-    case OperandSize::kByte:
-      return static_cast<uint8_t>(value & 0xff);
-    case OperandSize::kShort:
-      return static_cast<uint16_t>(value & 0xffff);
-    case OperandSize::kQuad:
-      return static_cast<uint32_t>(value);
-    case OperandSize::kNone:
-      UNREACHABLE();
-  }
-  return 0;
-}
-
-uint32_t BytecodeArrayBuilder::UnsignedOperand(int value) {
-  DCHECK_GE(value, 0);
-  return static_cast<uint32_t>(value);
-}
-
-uint32_t BytecodeArrayBuilder::UnsignedOperand(size_t value) {
-  DCHECK_LE(value, kMaxUInt32);
-  return static_cast<uint32_t>(value);
-}
-
 }  // namespace interpreter
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interpreter/bytecode-array-builder.h b/src/interpreter/bytecode-array-builder.h
index 3930a06..8a10973 100644
--- a/src/interpreter/bytecode-array-builder.h
+++ b/src/interpreter/bytecode-array-builder.h
@@ -11,7 +11,6 @@
 #include "src/interpreter/bytecodes.h"
 #include "src/interpreter/constant-array-builder.h"
 #include "src/interpreter/handler-table-builder.h"
-#include "src/interpreter/source-position-table.h"
 #include "src/zone-containers.h"
 
 namespace v8 {
@@ -86,8 +85,7 @@
   BytecodeArrayBuilder& LoadFalse();
 
   // Global loads to the accumulator and stores from the accumulator.
-  BytecodeArrayBuilder& LoadGlobal(const Handle<String> name, int feedback_slot,
-                                   TypeofMode typeof_mode);
+  BytecodeArrayBuilder& LoadGlobal(int feedback_slot, TypeofMode typeof_mode);
   BytecodeArrayBuilder& StoreGlobal(const Handle<String> name,
                                     int feedback_slot,
                                     LanguageMode language_mode);
@@ -273,11 +271,23 @@
 
   void EnsureReturn();
 
-  static uint32_t RegisterOperand(Register reg);
-  static Register RegisterFromOperand(uint32_t operand);
-  static uint32_t SignedOperand(int value, OperandSize size);
-  static uint32_t UnsignedOperand(int value);
-  static uint32_t UnsignedOperand(size_t value);
+  static uint32_t RegisterOperand(Register reg) {
+    return static_cast<uint32_t>(reg.ToOperand());
+  }
+
+  static uint32_t SignedOperand(int value) {
+    return static_cast<uint32_t>(value);
+  }
+
+  static uint32_t UnsignedOperand(int value) {
+    DCHECK_GE(value, 0);
+    return static_cast<uint32_t>(value);
+  }
+
+  static uint32_t UnsignedOperand(size_t value) {
+    DCHECK_LE(value, kMaxUInt32);
+    return static_cast<uint32_t>(value);
+  }
 
  private:
   friend class BytecodeRegisterAllocator;
@@ -285,8 +295,8 @@
   static Bytecode BytecodeForBinaryOperation(Token::Value op);
   static Bytecode BytecodeForCountOperation(Token::Value op);
   static Bytecode BytecodeForCompareOperation(Token::Value op);
-  static Bytecode BytecodeForStoreIC(LanguageMode language_mode);
-  static Bytecode BytecodeForKeyedStoreIC(LanguageMode language_mode);
+  static Bytecode BytecodeForStoreNamedProperty(LanguageMode language_mode);
+  static Bytecode BytecodeForStoreKeyedProperty(LanguageMode language_mode);
   static Bytecode BytecodeForLoadGlobal(TypeofMode typeof_mode);
   static Bytecode BytecodeForStoreGlobal(LanguageMode language_mode);
   static Bytecode BytecodeForStoreLookupSlot(LanguageMode language_mode);
@@ -294,34 +304,21 @@
   static Bytecode BytecodeForDelete(LanguageMode language_mode);
   static Bytecode BytecodeForCall(TailCallMode tail_call_mode);
 
-  static Bytecode GetJumpWithConstantOperand(Bytecode jump_smi8_operand);
-
+  void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+              uint32_t operand2, uint32_t operand3);
+  void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+              uint32_t operand2);
+  void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1);
+  void Output(Bytecode bytecode, uint32_t operand0);
   void Output(Bytecode bytecode);
-  void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
-                    uint32_t operand0, uint32_t operand1, uint32_t operand2,
-                    uint32_t operand3);
-  void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
-                    uint32_t operand0, uint32_t operand1, uint32_t operand2);
-  void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
-                    uint32_t operand0, uint32_t operand1);
-  void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
-                    uint32_t operand0);
 
   BytecodeArrayBuilder& OutputJump(Bytecode jump_bytecode,
                                    BytecodeLabel* label);
-  void PatchJump(size_t jump_target, size_t jump_location);
-  void PatchJumpWith8BitOperand(ZoneVector<uint8_t>* bytecodes,
-                                size_t jump_location, int delta);
-  void PatchJumpWith16BitOperand(ZoneVector<uint8_t>* bytecodes,
-                                 size_t jump_location, int delta);
-  void PatchJumpWith32BitOperand(ZoneVector<uint8_t>* bytecodes,
-                                 size_t jump_location, int delta);
 
-  void LeaveBasicBlock();
-
-  bool OperandIsValid(Bytecode bytecode, OperandScale operand_scale,
-                      int operand_index, uint32_t operand_value) const;
-  bool RegisterIsValid(Register reg, OperandSize reg_size) const;
+  bool RegisterIsValid(Register reg) const;
+  bool OperandsAreValid(Bytecode bytecode, int operand_count,
+                        uint32_t operand0 = 0, uint32_t operand1 = 0,
+                        uint32_t operand2 = 0, uint32_t operand3 = 0) const;
 
   // Attach latest source position to |node|.
   void AttachSourceInfo(BytecodeNode* node);
@@ -337,6 +334,8 @@
   // during bytecode generation.
   BytecodeArrayBuilder& Illegal();
 
+  void LeaveBasicBlock() { return_seen_in_block_ = false; }
+
   Isolate* isolate() const { return isolate_; }
   BytecodeArrayWriter* bytecode_array_writer() {
     return &bytecode_array_writer_;
@@ -351,18 +350,13 @@
   HandlerTableBuilder* handler_table_builder() {
     return &handler_table_builder_;
   }
-  SourcePositionTableBuilder* source_position_table_builder() {
-    return &source_position_table_builder_;
-  }
 
   Isolate* isolate_;
   Zone* zone_;
   bool bytecode_generated_;
   ConstantArrayBuilder constant_array_builder_;
   HandlerTableBuilder handler_table_builder_;
-  SourcePositionTableBuilder source_position_table_builder_;
-  bool exit_seen_in_block_;
-  int unbound_jumps_;
+  bool return_seen_in_block_;
   int parameter_count_;
   int local_register_count_;
   int context_register_count_;
@@ -375,47 +369,6 @@
   DISALLOW_COPY_AND_ASSIGN(BytecodeArrayBuilder);
 };
 
-
-// A label representing a branch target in a bytecode array. When a
-// label is bound, it represents a known position in the bytecode
-// array. For labels that are forward references there can be at most
-// one reference whilst it is unbound.
-class BytecodeLabel final {
- public:
-  BytecodeLabel() : bound_(false), offset_(kInvalidOffset) {}
-
-  bool is_bound() const { return bound_; }
-  size_t offset() const { return offset_; }
-
- private:
-  static const size_t kInvalidOffset = static_cast<size_t>(-1);
-
-  void bind_to(size_t offset) {
-    DCHECK(!bound_ && offset != kInvalidOffset);
-    offset_ = offset;
-    bound_ = true;
-  }
-
-  void set_referrer(size_t offset) {
-    DCHECK(!bound_ && offset != kInvalidOffset && offset_ == kInvalidOffset);
-    offset_ = offset;
-  }
-
-  bool is_forward_target() const {
-    return offset() != kInvalidOffset && !is_bound();
-  }
-
-  // There are three states for a label:
-  //                    bound_   offset_
-  //  UNSET             false    kInvalidOffset
-  //  FORWARD_TARGET    false    Offset of referring jump
-  //  BACKWARD_TARGET    true    Offset of label in bytecode array when bound
-  bool bound_;
-  size_t offset_;
-
-  friend class BytecodeArrayBuilder;
-};
-
 }  // namespace interpreter
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interpreter/bytecode-array-iterator.cc b/src/interpreter/bytecode-array-iterator.cc
index 319d2a0..a4a8f79 100644
--- a/src/interpreter/bytecode-array-iterator.cc
+++ b/src/interpreter/bytecode-array-iterator.cc
@@ -4,6 +4,7 @@
 
 #include "src/interpreter/bytecode-array-iterator.h"
 
+#include "src/interpreter/interpreter-intrinsics.h"
 #include "src/objects-inl.h"
 
 namespace v8 {
@@ -140,11 +141,23 @@
   }
 }
 
-uint32_t BytecodeArrayIterator::GetRuntimeIdOperand(int operand_index) const {
+Runtime::FunctionId BytecodeArrayIterator::GetRuntimeIdOperand(
+    int operand_index) const {
   OperandType operand_type =
       Bytecodes::GetOperandType(current_bytecode(), operand_index);
   DCHECK(operand_type == OperandType::kRuntimeId);
-  return GetUnsignedOperand(operand_index, operand_type);
+  uint32_t raw_id = GetUnsignedOperand(operand_index, operand_type);
+  return static_cast<Runtime::FunctionId>(raw_id);
+}
+
+Runtime::FunctionId BytecodeArrayIterator::GetIntrinsicIdOperand(
+    int operand_index) const {
+  OperandType operand_type =
+      Bytecodes::GetOperandType(current_bytecode(), operand_index);
+  DCHECK(operand_type == OperandType::kIntrinsicId);
+  uint32_t raw_id = GetUnsignedOperand(operand_index, operand_type);
+  return IntrinsicsHelper::ToRuntimeId(
+      static_cast<IntrinsicsHelper::IntrinsicId>(raw_id));
 }
 
 Handle<Object> BytecodeArrayIterator::GetConstantForIndexOperand(
diff --git a/src/interpreter/bytecode-array-iterator.h b/src/interpreter/bytecode-array-iterator.h
index b372894..90001ef 100644
--- a/src/interpreter/bytecode-array-iterator.h
+++ b/src/interpreter/bytecode-array-iterator.h
@@ -8,6 +8,7 @@
 #include "src/handles.h"
 #include "src/interpreter/bytecodes.h"
 #include "src/objects.h"
+#include "src/runtime/runtime.h"
 
 namespace v8 {
 namespace internal {
@@ -34,7 +35,8 @@
   uint32_t GetRegisterCountOperand(int operand_index) const;
   Register GetRegisterOperand(int operand_index) const;
   int GetRegisterOperandRange(int operand_index) const;
-  uint32_t GetRuntimeIdOperand(int operand_index) const;
+  Runtime::FunctionId GetRuntimeIdOperand(int operand_index) const;
+  Runtime::FunctionId GetIntrinsicIdOperand(int operand_index) const;
   Handle<Object> GetConstantForIndexOperand(int operand_index) const;
 
   // Returns the absolute offset of the branch target at the current
diff --git a/src/interpreter/bytecode-array-writer.cc b/src/interpreter/bytecode-array-writer.cc
index 029688e..c476042 100644
--- a/src/interpreter/bytecode-array-writer.cc
+++ b/src/interpreter/bytecode-array-writer.cc
@@ -4,46 +4,165 @@
 
 #include "src/interpreter/bytecode-array-writer.h"
 
-#include <iomanip>
-#include "src/interpreter/source-position-table.h"
+#include "src/api.h"
+#include "src/interpreter/bytecode-label.h"
+#include "src/interpreter/constant-array-builder.h"
+#include "src/log.h"
 
 namespace v8 {
 namespace internal {
 namespace interpreter {
 
 BytecodeArrayWriter::BytecodeArrayWriter(
-    Zone* zone, SourcePositionTableBuilder* source_position_table_builder)
-    : bytecodes_(zone),
+    Isolate* isolate, Zone* zone, ConstantArrayBuilder* constant_array_builder)
+    : isolate_(isolate),
+      bytecodes_(zone),
       max_register_count_(0),
-      source_position_table_builder_(source_position_table_builder) {}
+      unbound_jumps_(0),
+      source_position_table_builder_(isolate, zone),
+      constant_array_builder_(constant_array_builder) {
+  LOG_CODE_EVENT(isolate_, CodeStartLinePosInfoRecordEvent(
+                               source_position_table_builder()));
+}
 
 // override
 BytecodeArrayWriter::~BytecodeArrayWriter() {}
 
 // override
-size_t BytecodeArrayWriter::FlushForOffset() { return bytecodes()->size(); }
+Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
+    int fixed_register_count, int parameter_count,
+    Handle<FixedArray> handler_table) {
+  DCHECK_EQ(0, unbound_jumps_);
+
+  int bytecode_size = static_cast<int>(bytecodes()->size());
+
+  // All locals need a frame slot for the debugger, but may not be
+  // present in generated code.
+  int frame_size_for_locals = fixed_register_count * kPointerSize;
+  int frame_size_used = max_register_count() * kPointerSize;
+  int frame_size = std::max(frame_size_for_locals, frame_size_used);
+  Handle<FixedArray> constant_pool = constant_array_builder()->ToFixedArray();
+  Handle<ByteArray> source_position_table =
+      source_position_table_builder()->ToSourcePositionTable();
+  Handle<BytecodeArray> bytecode_array = isolate_->factory()->NewBytecodeArray(
+      bytecode_size, &bytecodes()->front(), frame_size, parameter_count,
+      constant_pool);
+  bytecode_array->set_handler_table(*handler_table);
+  bytecode_array->set_source_position_table(*source_position_table);
+
+  void* line_info = source_position_table_builder()->DetachJITHandlerData();
+  LOG_CODE_EVENT(isolate_, CodeEndLinePosInfoRecordEvent(
+                               AbstractCode::cast(*bytecode_array), line_info));
+  return bytecode_array;
+}
 
 // override
 void BytecodeArrayWriter::Write(BytecodeNode* node) {
+  DCHECK(!Bytecodes::IsJump(node->bytecode()));
   UpdateSourcePositionTable(node);
   EmitBytecode(node);
 }
 
+// override
+void BytecodeArrayWriter::WriteJump(BytecodeNode* node, BytecodeLabel* label) {
+  DCHECK(Bytecodes::IsJump(node->bytecode()));
+  UpdateSourcePositionTable(node);
+  EmitJump(node, label);
+}
+
+// override
+void BytecodeArrayWriter::BindLabel(BytecodeLabel* label) {
+  size_t current_offset = bytecodes()->size();
+  if (label->is_forward_target()) {
+    // An earlier jump instruction refers to this label. Update it's location.
+    PatchJump(current_offset, label->offset());
+    // Now treat as if the label will only be back referred to.
+  }
+  label->bind_to(current_offset);
+}
+
+// override
+void BytecodeArrayWriter::BindLabel(const BytecodeLabel& target,
+                                    BytecodeLabel* label) {
+  DCHECK(!label->is_bound());
+  DCHECK(target.is_bound());
+  if (label->is_forward_target()) {
+    // An earlier jump instruction refers to this label. Update it's location.
+    PatchJump(target.offset(), label->offset());
+    // Now treat as if the label will only be back referred to.
+  }
+  label->bind_to(target.offset());
+}
+
 void BytecodeArrayWriter::UpdateSourcePositionTable(
     const BytecodeNode* const node) {
   int bytecode_offset = static_cast<int>(bytecodes()->size());
   const BytecodeSourceInfo& source_info = node->source_info();
   if (source_info.is_valid()) {
-    source_position_table_builder_->AddPosition(bytecode_offset,
-                                                source_info.source_position(),
-                                                source_info.is_statement());
+    source_position_table_builder()->AddPosition(bytecode_offset,
+                                                 source_info.source_position(),
+                                                 source_info.is_statement());
   }
 }
 
+namespace {
+
+OperandScale ScaleForScalableByteOperand(OperandSize operand_size) {
+  STATIC_ASSERT(static_cast<int>(OperandSize::kByte) ==
+                static_cast<int>(OperandScale::kSingle));
+  STATIC_ASSERT(static_cast<int>(OperandSize::kShort) ==
+                static_cast<int>(OperandScale::kDouble));
+  STATIC_ASSERT(static_cast<int>(OperandSize::kQuad) ==
+                static_cast<int>(OperandScale::kQuadruple));
+  return static_cast<OperandScale>(operand_size);
+}
+
+OperandScale OperandScaleForScalableSignedByte(uint32_t operand_value) {
+  int32_t signed_operand = static_cast<int32_t>(operand_value);
+  OperandSize bytes_required = Bytecodes::SizeForSignedOperand(signed_operand);
+  return ScaleForScalableByteOperand(bytes_required);
+}
+
+OperandScale OperandScaleForScalableUnsignedByte(uint32_t operand_value) {
+  OperandSize bytes_required = Bytecodes::SizeForUnsignedOperand(operand_value);
+  return ScaleForScalableByteOperand(bytes_required);
+}
+
+OperandScale GetOperandScale(const BytecodeNode* const node) {
+  const OperandTypeInfo* operand_type_infos =
+      Bytecodes::GetOperandTypeInfos(node->bytecode());
+  OperandScale operand_scale = OperandScale::kSingle;
+  for (int i = 0; i < node->operand_count(); ++i) {
+    switch (operand_type_infos[i]) {
+      case OperandTypeInfo::kScalableSignedByte: {
+        uint32_t operand = node->operand(i);
+        operand_scale =
+            std::max(operand_scale, OperandScaleForScalableSignedByte(operand));
+        break;
+      }
+      case OperandTypeInfo::kScalableUnsignedByte: {
+        uint32_t operand = node->operand(i);
+        operand_scale = std::max(operand_scale,
+                                 OperandScaleForScalableUnsignedByte(operand));
+        break;
+      }
+      case OperandTypeInfo::kFixedUnsignedByte:
+      case OperandTypeInfo::kFixedUnsignedShort:
+        break;
+      case OperandTypeInfo::kNone:
+        UNREACHABLE();
+        break;
+    }
+  }
+  return operand_scale;
+}
+
+}  // namespace
+
 void BytecodeArrayWriter::EmitBytecode(const BytecodeNode* const node) {
   DCHECK_NE(node->bytecode(), Bytecode::kIllegal);
 
-  OperandScale operand_scale = node->operand_scale();
+  OperandScale operand_scale = GetOperandScale(node);
   if (operand_scale != OperandScale::kSingle) {
     Bytecode prefix = Bytecodes::OperandScaleToPrefixBytecode(operand_scale);
     bytecodes()->push_back(Bytecodes::ToByte(prefix));
@@ -54,10 +173,12 @@
 
   int register_operand_bitmap = Bytecodes::GetRegisterOperandBitmap(bytecode);
   const uint32_t* const operands = node->operands();
+  const OperandSize* operand_sizes =
+      Bytecodes::GetOperandSizes(bytecode, operand_scale);
   const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
   for (int i = 0; operand_types[i] != OperandType::kNone; ++i) {
     OperandType operand_type = operand_types[i];
-    switch (Bytecodes::SizeOfOperand(operand_type, operand_scale)) {
+    switch (operand_sizes[i]) {
       case OperandSize::kNone:
         UNREACHABLE();
         break;
@@ -93,11 +214,175 @@
   }
 }
 
-// override
-void BytecodeArrayWriter::FlushBasicBlock() {}
+// static
+Bytecode GetJumpWithConstantOperand(Bytecode jump_bytecode) {
+  switch (jump_bytecode) {
+    case Bytecode::kJump:
+      return Bytecode::kJumpConstant;
+    case Bytecode::kJumpIfTrue:
+      return Bytecode::kJumpIfTrueConstant;
+    case Bytecode::kJumpIfFalse:
+      return Bytecode::kJumpIfFalseConstant;
+    case Bytecode::kJumpIfToBooleanTrue:
+      return Bytecode::kJumpIfToBooleanTrueConstant;
+    case Bytecode::kJumpIfToBooleanFalse:
+      return Bytecode::kJumpIfToBooleanFalseConstant;
+    case Bytecode::kJumpIfNotHole:
+      return Bytecode::kJumpIfNotHoleConstant;
+    case Bytecode::kJumpIfNull:
+      return Bytecode::kJumpIfNullConstant;
+    case Bytecode::kJumpIfUndefined:
+      return Bytecode::kJumpIfUndefinedConstant;
+    default:
+      UNREACHABLE();
+      return Bytecode::kIllegal;
+  }
+}
 
-int BytecodeArrayWriter::GetMaximumFrameSizeUsed() {
-  return max_register_count_ * kPointerSize;
+void BytecodeArrayWriter::PatchJumpWith8BitOperand(size_t jump_location,
+                                                   int delta) {
+  Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes()->at(jump_location));
+  DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
+  size_t operand_location = jump_location + 1;
+  DCHECK_EQ(bytecodes()->at(operand_location), k8BitJumpPlaceholder);
+  if (Bytecodes::SizeForSignedOperand(delta) == OperandSize::kByte) {
+    // The jump fits within the range of an Imm operand, so cancel
+    // the reservation and jump directly.
+    constant_array_builder()->DiscardReservedEntry(OperandSize::kByte);
+    bytecodes()->at(operand_location) = static_cast<uint8_t>(delta);
+  } else {
+    // The jump does not fit within the range of an Imm operand, so
+    // commit reservation putting the offset into the constant pool,
+    // and update the jump instruction and operand.
+    size_t entry = constant_array_builder()->CommitReservedEntry(
+        OperandSize::kByte, handle(Smi::FromInt(delta), isolate()));
+    DCHECK_LE(entry, kMaxUInt32);
+    DCHECK_EQ(Bytecodes::SizeForUnsignedOperand(static_cast<uint32_t>(entry)),
+              OperandSize::kByte);
+    jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
+    bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
+    bytecodes()->at(operand_location) = static_cast<uint8_t>(entry);
+  }
+}
+
+void BytecodeArrayWriter::PatchJumpWith16BitOperand(size_t jump_location,
+                                                    int delta) {
+  Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes()->at(jump_location));
+  DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
+  size_t operand_location = jump_location + 1;
+  uint8_t operand_bytes[2];
+  if (Bytecodes::SizeForSignedOperand(delta) <= OperandSize::kShort) {
+    constant_array_builder()->DiscardReservedEntry(OperandSize::kShort);
+    WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(delta));
+  } else {
+    jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
+    bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
+    size_t entry = constant_array_builder()->CommitReservedEntry(
+        OperandSize::kShort, handle(Smi::FromInt(delta), isolate()));
+    WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry));
+  }
+  DCHECK(bytecodes()->at(operand_location) == k8BitJumpPlaceholder &&
+         bytecodes()->at(operand_location + 1) == k8BitJumpPlaceholder);
+  bytecodes()->at(operand_location++) = operand_bytes[0];
+  bytecodes()->at(operand_location) = operand_bytes[1];
+}
+
+void BytecodeArrayWriter::PatchJumpWith32BitOperand(size_t jump_location,
+                                                    int delta) {
+  DCHECK(Bytecodes::IsJumpImmediate(
+      Bytecodes::FromByte(bytecodes()->at(jump_location))));
+  constant_array_builder()->DiscardReservedEntry(OperandSize::kQuad);
+  uint8_t operand_bytes[4];
+  WriteUnalignedUInt32(operand_bytes, static_cast<uint32_t>(delta));
+  size_t operand_location = jump_location + 1;
+  DCHECK(bytecodes()->at(operand_location) == k8BitJumpPlaceholder &&
+         bytecodes()->at(operand_location + 1) == k8BitJumpPlaceholder &&
+         bytecodes()->at(operand_location + 2) == k8BitJumpPlaceholder &&
+         bytecodes()->at(operand_location + 3) == k8BitJumpPlaceholder);
+  bytecodes()->at(operand_location++) = operand_bytes[0];
+  bytecodes()->at(operand_location++) = operand_bytes[1];
+  bytecodes()->at(operand_location++) = operand_bytes[2];
+  bytecodes()->at(operand_location) = operand_bytes[3];
+}
+
+void BytecodeArrayWriter::PatchJump(size_t jump_target, size_t jump_location) {
+  Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes()->at(jump_location));
+  int delta = static_cast<int>(jump_target - jump_location);
+  int prefix_offset = 0;
+  OperandScale operand_scale = OperandScale::kSingle;
+  if (Bytecodes::IsPrefixScalingBytecode(jump_bytecode)) {
+    // If a prefix scaling bytecode is emitted the target offset is one
+    // less than the case of no prefix scaling bytecode.
+    delta -= 1;
+    prefix_offset = 1;
+    operand_scale = Bytecodes::PrefixBytecodeToOperandScale(jump_bytecode);
+    jump_bytecode =
+        Bytecodes::FromByte(bytecodes()->at(jump_location + prefix_offset));
+  }
+
+  DCHECK(Bytecodes::IsJump(jump_bytecode));
+  switch (operand_scale) {
+    case OperandScale::kSingle:
+      PatchJumpWith8BitOperand(jump_location, delta);
+      break;
+    case OperandScale::kDouble:
+      PatchJumpWith16BitOperand(jump_location + prefix_offset, delta);
+      break;
+    case OperandScale::kQuadruple:
+      PatchJumpWith32BitOperand(jump_location + prefix_offset, delta);
+      break;
+    default:
+      UNREACHABLE();
+  }
+  unbound_jumps_--;
+}
+
+void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) {
+  DCHECK(Bytecodes::IsJump(node->bytecode()));
+  DCHECK_EQ(0, node->operand(0));
+
+  size_t current_offset = bytecodes()->size();
+
+  if (label->is_bound()) {
+    CHECK_GE(current_offset, label->offset());
+    CHECK_LE(current_offset, static_cast<size_t>(kMaxInt));
+    // Label has been bound already so this is a backwards jump.
+    size_t abs_delta = current_offset - label->offset();
+    int delta = -static_cast<int>(abs_delta);
+    OperandSize operand_size = Bytecodes::SizeForSignedOperand(delta);
+    if (operand_size > OperandSize::kByte) {
+      // Adjust for scaling byte prefix for wide jump offset.
+      DCHECK_LE(delta, 0);
+      delta -= 1;
+    }
+    node->set_bytecode(node->bytecode(), delta);
+  } else {
+    // The label has not yet been bound so this is a forward reference
+    // that will be patched when the label is bound. We create a
+    // reservation in the constant pool so the jump can be patched
+    // when the label is bound. The reservation means the maximum size
+    // of the operand for the constant is known and the jump can
+    // be emitted into the bytecode stream with space for the operand.
+    unbound_jumps_++;
+    label->set_referrer(current_offset);
+    OperandSize reserved_operand_size =
+        constant_array_builder()->CreateReservedEntry();
+    switch (reserved_operand_size) {
+      case OperandSize::kNone:
+        UNREACHABLE();
+        break;
+      case OperandSize::kByte:
+        node->set_bytecode(node->bytecode(), k8BitJumpPlaceholder);
+        break;
+      case OperandSize::kShort:
+        node->set_bytecode(node->bytecode(), k16BitJumpPlaceholder);
+        break;
+      case OperandSize::kQuad:
+        node->set_bytecode(node->bytecode(), k32BitJumpPlaceholder);
+        break;
+    }
+  }
+  EmitBytecode(node);
 }
 
 }  // namespace interpreter
diff --git a/src/interpreter/bytecode-array-writer.h b/src/interpreter/bytecode-array-writer.h
index b1303c9..76d881e 100644
--- a/src/interpreter/bytecode-array-writer.h
+++ b/src/interpreter/bytecode-array-writer.h
@@ -6,40 +6,70 @@
 #define V8_INTERPRETER_BYTECODE_ARRAY_WRITER_H_
 
 #include "src/interpreter/bytecode-pipeline.h"
+#include "src/interpreter/source-position-table.h"
 
 namespace v8 {
 namespace internal {
 namespace interpreter {
 
+class BytecodeLabel;
 class SourcePositionTableBuilder;
+class ConstantArrayBuilder;
 
 // Class for emitting bytecode as the final stage of the bytecode
 // generation pipeline.
 class BytecodeArrayWriter final : public BytecodePipelineStage {
  public:
-  BytecodeArrayWriter(
-      Zone* zone, SourcePositionTableBuilder* source_position_table_builder);
+  BytecodeArrayWriter(Isolate* isolate, Zone* zone,
+                      ConstantArrayBuilder* constant_array_builder);
   virtual ~BytecodeArrayWriter();
 
+  // BytecodePipelineStage interface.
   void Write(BytecodeNode* node) override;
-  size_t FlushForOffset() override;
-  void FlushBasicBlock() override;
-
-  // Get the bytecode vector.
-  ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
-
-  // Returns the size in bytes of the frame associated with the
-  // bytecode written.
-  int GetMaximumFrameSizeUsed();
+  void WriteJump(BytecodeNode* node, BytecodeLabel* label) override;
+  void BindLabel(BytecodeLabel* label) override;
+  void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
+  Handle<BytecodeArray> ToBytecodeArray(
+      int fixed_register_count, int parameter_count,
+      Handle<FixedArray> handler_table) override;
 
  private:
+  // Constants that act as placeholders for jump operands to be
+  // patched. These have operand sizes that match the sizes of
+  // reserved constant pool entries.
+  const uint32_t k8BitJumpPlaceholder = 0x7f;
+  const uint32_t k16BitJumpPlaceholder =
+      k8BitJumpPlaceholder | (k8BitJumpPlaceholder << 8);
+  const uint32_t k32BitJumpPlaceholder =
+      k16BitJumpPlaceholder | (k16BitJumpPlaceholder << 16);
+
+  void PatchJump(size_t jump_target, size_t jump_location);
+  void PatchJumpWith8BitOperand(size_t jump_location, int delta);
+  void PatchJumpWith16BitOperand(size_t jump_location, int delta);
+  void PatchJumpWith32BitOperand(size_t jump_location, int delta);
+
   void EmitBytecode(const BytecodeNode* const node);
+  void EmitJump(BytecodeNode* node, BytecodeLabel* label);
   void UpdateSourcePositionTable(const BytecodeNode* const node);
 
+  Isolate* isolate() { return isolate_; }
+  ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
+  SourcePositionTableBuilder* source_position_table_builder() {
+    return &source_position_table_builder_;
+  }
+  ConstantArrayBuilder* constant_array_builder() {
+    return constant_array_builder_;
+  }
+  int max_register_count() { return max_register_count_; }
+
+  Isolate* isolate_;
   ZoneVector<uint8_t> bytecodes_;
   int max_register_count_;
-  SourcePositionTableBuilder* source_position_table_builder_;
+  int unbound_jumps_;
+  SourcePositionTableBuilder source_position_table_builder_;
+  ConstantArrayBuilder* constant_array_builder_;
 
+  friend class BytecodeArrayWriterUnittest;
   DISALLOW_COPY_AND_ASSIGN(BytecodeArrayWriter);
 };
 
diff --git a/src/interpreter/bytecode-dead-code-optimizer.cc b/src/interpreter/bytecode-dead-code-optimizer.cc
new file mode 100644
index 0000000..964d2a8
--- /dev/null
+++ b/src/interpreter/bytecode-dead-code-optimizer.cc
@@ -0,0 +1,77 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-dead-code-optimizer.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+BytecodeDeadCodeOptimizer::BytecodeDeadCodeOptimizer(
+    BytecodePipelineStage* next_stage)
+    : next_stage_(next_stage), exit_seen_in_block_(false) {}
+
+// override
+Handle<BytecodeArray> BytecodeDeadCodeOptimizer::ToBytecodeArray(
+    int fixed_register_count, int parameter_count,
+    Handle<FixedArray> handler_table) {
+  return next_stage_->ToBytecodeArray(fixed_register_count, parameter_count,
+                                      handler_table);
+}
+
+// override
+void BytecodeDeadCodeOptimizer::Write(BytecodeNode* node) {
+  // Don't emit dead code.
+  if (exit_seen_in_block_) return;
+
+  switch (node->bytecode()) {
+    case Bytecode::kReturn:
+    case Bytecode::kThrow:
+    case Bytecode::kReThrow:
+      exit_seen_in_block_ = true;
+      break;
+    default:
+      break;
+  }
+
+  next_stage_->Write(node);
+}
+
+// override
+void BytecodeDeadCodeOptimizer::WriteJump(BytecodeNode* node,
+                                          BytecodeLabel* label) {
+  // Don't emit dead code.
+  // TODO(rmcilroy): For forward jumps we could mark the label as dead, thereby
+  // avoiding emitting dead code when we bind the label.
+  if (exit_seen_in_block_) return;
+
+  switch (node->bytecode()) {
+    case Bytecode::kJump:
+    case Bytecode::kJumpConstant:
+      exit_seen_in_block_ = true;
+      break;
+    default:
+      break;
+  }
+
+  next_stage_->WriteJump(node, label);
+}
+
+// override
+void BytecodeDeadCodeOptimizer::BindLabel(BytecodeLabel* label) {
+  next_stage_->BindLabel(label);
+  exit_seen_in_block_ = false;
+}
+
+// override
+void BytecodeDeadCodeOptimizer::BindLabel(const BytecodeLabel& target,
+                                          BytecodeLabel* label) {
+  next_stage_->BindLabel(target, label);
+  // exit_seen_in_block_ was reset when target was bound, so shouldn't be
+  // changed here.
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/bytecode-dead-code-optimizer.h b/src/interpreter/bytecode-dead-code-optimizer.h
new file mode 100644
index 0000000..8d68e54
--- /dev/null
+++ b/src/interpreter/bytecode-dead-code-optimizer.h
@@ -0,0 +1,41 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_DEAD_CODE_OPTIMIZER_H_
+#define V8_INTERPRETER_BYTECODE_DEAD_CODE_OPTIMIZER_H_
+
+#include "src/interpreter/bytecode-pipeline.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// An optimization stage for eliminating obviously dead code in bytecode
+// generation.
+class BytecodeDeadCodeOptimizer final : public BytecodePipelineStage,
+                                        public ZoneObject {
+ public:
+  explicit BytecodeDeadCodeOptimizer(BytecodePipelineStage* next_stage);
+
+  // BytecodePipelineStage interface.
+  void Write(BytecodeNode* node) override;
+  void WriteJump(BytecodeNode* node, BytecodeLabel* label) override;
+  void BindLabel(BytecodeLabel* label) override;
+  void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
+  Handle<BytecodeArray> ToBytecodeArray(
+      int fixed_register_count, int parameter_count,
+      Handle<FixedArray> handler_table) override;
+
+ private:
+  BytecodePipelineStage* next_stage_;
+  bool exit_seen_in_block_;
+
+  DISALLOW_COPY_AND_ASSIGN(BytecodeDeadCodeOptimizer);
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_BYTECODE_DEAD_CODE_OPTIMIZER_H_
diff --git a/src/interpreter/bytecode-generator.cc b/src/interpreter/bytecode-generator.cc
index 650234a..b7cfd49 100644
--- a/src/interpreter/bytecode-generator.cc
+++ b/src/interpreter/bytecode-generator.cc
@@ -17,7 +17,6 @@
 namespace internal {
 namespace interpreter {
 
-
 // Scoped class tracking context objects created by the visitor. Represents
 // mutations of the context chain within the function body, allowing pushing and
 // popping of the current {context_register} during visitation.
@@ -88,7 +87,6 @@
   bool should_pop_context_;
 };
 
-
 // Scoped class for tracking control statements entered by the
 // visitor. The pattern derives AstGraphBuilder::ControlScope.
 class BytecodeGenerator::ControlScope BASE_EMBEDDED {
@@ -124,7 +122,6 @@
   DISALLOW_COPY_AND_ASSIGN(ControlScope);
 };
 
-
 // Helper class for a try-finally control scope. It can record intercepted
 // control-flow commands that cause entry into a finally-block, and re-apply
 // them after again leaving that block. Special tokens are used to identify
@@ -203,7 +200,6 @@
   Register result_register_;
 };
 
-
 // Scoped class for dealing with control flow reaching the function level.
 class BytecodeGenerator::ControlScopeForTopLevel final
     : public BytecodeGenerator::ControlScope {
@@ -228,7 +224,6 @@
   }
 };
 
-
 // Scoped class for enabling break inside blocks and switch blocks.
 class BytecodeGenerator::ControlScopeForBreakable final
     : public BytecodeGenerator::ControlScope {
@@ -260,7 +255,6 @@
   BreakableControlFlowBuilder* control_builder_;
 };
 
-
 // Scoped class for enabling 'break' and 'continue' in iteration
 // constructs, e.g. do...while, while..., for...
 class BytecodeGenerator::ControlScopeForIteration final
@@ -295,7 +289,6 @@
   LoopBuilder* loop_builder_;
 };
 
-
 // Scoped class for enabling 'throw' in try-catch constructs.
 class BytecodeGenerator::ControlScopeForTryCatch final
     : public BytecodeGenerator::ControlScope {
@@ -324,7 +317,6 @@
   }
 };
 
-
 // Scoped class for enabling control flow through try-finally constructs.
 class BytecodeGenerator::ControlScopeForTryFinally final
     : public BytecodeGenerator::ControlScope {
@@ -360,7 +352,6 @@
   DeferredCommands* commands_;
 };
 
-
 void BytecodeGenerator::ControlScope::PerformCommand(Command command,
                                                      Statement* statement) {
   ControlScope* current = this;
@@ -383,7 +374,6 @@
   UNREACHABLE();
 }
 
-
 class BytecodeGenerator::RegisterAllocationScope {
  public:
   explicit RegisterAllocationScope(BytecodeGenerator* generator)
@@ -441,7 +431,6 @@
   DISALLOW_COPY_AND_ASSIGN(RegisterAllocationScope);
 };
 
-
 // Scoped base class for determining where the result of an expression
 // is stored.
 class BytecodeGenerator::ExpressionResultScope {
@@ -489,7 +478,6 @@
   DISALLOW_COPY_AND_ASSIGN(ExpressionResultScope);
 };
 
-
 // Scoped class used when the result of the current expression is not
 // expected to produce a result.
 class BytecodeGenerator::EffectResultScope final
@@ -504,7 +492,6 @@
   virtual void SetResultInRegister(Register reg) {}
 };
 
-
 // Scoped class used when the result of the current expression to be
 // evaluated should go into the interpreter's accumulator register.
 class BytecodeGenerator::AccumulatorResultScope final
@@ -521,7 +508,6 @@
   }
 };
 
-
 // Scoped class used when the result of the current expression to be
 // evaluated should go into an interpreter register.
 class BytecodeGenerator::RegisterResultScope final
@@ -585,7 +571,7 @@
 
   RegisterAllocationScope register_scope(this);
 
-  if (IsGeneratorFunction(info()->literal()->kind())) {
+  if (IsResumableFunction(info()->literal()->kind())) {
     generator_state_ = register_allocator()->NewRegister();
     VisitGeneratorPrologue();
   }
@@ -613,7 +599,6 @@
   return builder()->ToBytecodeArray();
 }
 
-
 void BytecodeGenerator::MakeBytecodeBody() {
   // Build the arguments object if it is used.
   VisitArgumentsObject(scope()->arguments());
@@ -656,13 +641,7 @@
         .JumpIfTrue(&(targets[i]));
   }
 
-  RegisterAllocationScope register_scope(this);
-  Register reason = register_allocator()->NewRegister();
-  BailoutReason bailout_reason = BailoutReason::kInvalidJumpTableIndex;
-  builder()
-      ->LoadLiteral(Smi::FromInt(static_cast<int>(bailout_reason)))
-      .StoreAccumulatorInRegister(reason)
-      .CallRuntime(Runtime::kAbort, reason, 1);
+  BuildAbort(BailoutReason::kInvalidJumpTableIndex);
 }
 
 void BytecodeGenerator::VisitIterationHeader(IterationStatement* stmt,
@@ -717,10 +696,13 @@
   BuildIndexedJump(generator_state_, 0, generator_resume_points_.size(),
                    generator_resume_points_);
 
-  builder()->Bind(&regular_call);
+  builder()
+      ->Bind(&regular_call)
+      .LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))
+      .StoreAccumulatorInRegister(generator_state_);
   // This is a regular call. Fall through to the ordinary function prologue,
-  // after which we will run into the generator object creation and the initial
-  // yield (both inserted by the parser).
+  // after which we will run into the generator object creation and other extra
+  // code inserted by the parser.
 }
 
 void BytecodeGenerator::VisitBlock(Block* stmt) {
@@ -734,7 +716,6 @@
   }
 }
 
-
 void BytecodeGenerator::VisitBlockDeclarationsAndStatements(Block* stmt) {
   BlockBuilder block_builder(builder());
   ControlScopeForBreakable execution_control(this, stmt, &block_builder);
@@ -745,7 +726,6 @@
   if (stmt->labels() != nullptr) block_builder.EndBlock();
 }
 
-
 void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
   Variable* variable = decl->proxy()->var();
   VariableMode mode = decl->mode();
@@ -780,34 +760,20 @@
       }
       break;
     case VariableLocation::LOOKUP: {
-      DCHECK(IsDeclaredVariableMode(mode));
+      DCHECK_EQ(VAR, mode);
+      DCHECK(!hole_init);
 
-      register_allocator()->PrepareForConsecutiveAllocations(3);
-      Register name = register_allocator()->NextConsecutiveRegister();
-      Register init_value = register_allocator()->NextConsecutiveRegister();
-      Register attributes = register_allocator()->NextConsecutiveRegister();
+      Register name = register_allocator()->NewRegister();
 
-      builder()->LoadLiteral(variable->name()).StoreAccumulatorInRegister(name);
-      if (hole_init) {
-        builder()->LoadTheHole().StoreAccumulatorInRegister(init_value);
-      } else {
-        // For variables, we must not use an initial value (such as 'undefined')
-        // because we may have a (legal) redeclaration and we must not destroy
-        // the current value.
-        builder()
-            ->LoadLiteral(Smi::FromInt(0))
-            .StoreAccumulatorInRegister(init_value);
-      }
       builder()
-          ->LoadLiteral(Smi::FromInt(variable->DeclarationPropertyAttributes()))
-          .StoreAccumulatorInRegister(attributes)
-          .CallRuntime(Runtime::kDeclareLookupSlot, name, 3);
+          ->LoadLiteral(variable->name())
+          .StoreAccumulatorInRegister(name)
+          .CallRuntime(Runtime::kDeclareEvalVar, name, 1);
       break;
     }
   }
 }
 
-
 void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
   Variable* variable = decl->proxy()->var();
   switch (variable->location()) {
@@ -838,33 +804,26 @@
       break;
     }
     case VariableLocation::LOOKUP: {
-      register_allocator()->PrepareForConsecutiveAllocations(3);
+      register_allocator()->PrepareForConsecutiveAllocations(2);
       Register name = register_allocator()->NextConsecutiveRegister();
       Register literal = register_allocator()->NextConsecutiveRegister();
-      Register attributes = register_allocator()->NextConsecutiveRegister();
       builder()->LoadLiteral(variable->name()).StoreAccumulatorInRegister(name);
 
       VisitForAccumulatorValue(decl->fun());
-      builder()
-          ->StoreAccumulatorInRegister(literal)
-          .LoadLiteral(Smi::FromInt(variable->DeclarationPropertyAttributes()))
-          .StoreAccumulatorInRegister(attributes)
-          .CallRuntime(Runtime::kDeclareLookupSlot, name, 3);
+      builder()->StoreAccumulatorInRegister(literal).CallRuntime(
+          Runtime::kDeclareEvalFunction, name, 2);
     }
   }
 }
 
-
 void BytecodeGenerator::VisitImportDeclaration(ImportDeclaration* decl) {
   UNIMPLEMENTED();
 }
 
-
 void BytecodeGenerator::VisitExportDeclaration(ExportDeclaration* decl) {
   UNIMPLEMENTED();
 }
 
-
 void BytecodeGenerator::VisitDeclarations(
     ZoneList<Declaration*>* declarations) {
   RegisterAllocationScope register_scope(this);
@@ -893,7 +852,6 @@
   globals()->clear();
 }
 
-
 void BytecodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
   for (int i = 0; i < statements->length(); i++) {
     // Allocate an outer register allocations scope for the statement.
@@ -904,17 +862,14 @@
   }
 }
 
-
 void BytecodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
   builder()->SetStatementPosition(stmt);
   VisitForEffect(stmt->expression());
 }
 
-
 void BytecodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
 }
 
-
 void BytecodeGenerator::VisitIfStatement(IfStatement* stmt) {
   builder()->SetStatementPosition(stmt);
   BytecodeLabel else_label, end_label;
@@ -944,32 +899,27 @@
   }
 }
 
-
 void BytecodeGenerator::VisitSloppyBlockFunctionStatement(
     SloppyBlockFunctionStatement* stmt) {
   Visit(stmt->statement());
 }
 
-
 void BytecodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
   builder()->SetStatementPosition(stmt);
   execution_control()->Continue(stmt->target());
 }
 
-
 void BytecodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
   builder()->SetStatementPosition(stmt);
   execution_control()->Break(stmt->target());
 }
 
-
 void BytecodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
   builder()->SetStatementPosition(stmt);
   VisitForAccumulatorValue(stmt->expression());
   execution_control()->ReturnAccumulator();
 }
 
-
 void BytecodeGenerator::VisitWithStatement(WithStatement* stmt) {
   builder()->SetStatementPosition(stmt);
   VisitForAccumulatorValue(stmt->expression());
@@ -978,7 +928,6 @@
   VisitInScope(stmt->statement(), stmt->scope());
 }
 
-
 void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
   // We need this scope because we visit for register values. We have to
   // maintain a execution result scope where registers can be allocated.
@@ -1029,7 +978,6 @@
   switch_builder.SetBreakTarget(done_label);
 }
 
-
 void BytecodeGenerator::VisitCaseClause(CaseClause* clause) {
   // Handled entirely in VisitSwitchStatement.
   UNREACHABLE();
@@ -1078,7 +1026,6 @@
   loop_builder.EndLoop();
 }
 
-
 void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
   if (stmt->init() != nullptr) {
     Visit(stmt->init());
@@ -1105,7 +1052,6 @@
   loop_builder.EndLoop();
 }
 
-
 void BytecodeGenerator::VisitForInAssignment(Expression* expr,
                                              FeedbackVectorSlot slot) {
   DCHECK(expr->IsValidReferenceExpression());
@@ -1179,7 +1125,6 @@
   }
 }
 
-
 void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
   if (stmt->subject()->IsNullLiteral() ||
       stmt->subject()->IsUndefinedLiteral()) {
@@ -1231,7 +1176,6 @@
   builder()->Bind(&subject_undefined_label);
 }
 
-
 void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
   LoopBuilder loop_builder(builder());
   ControlScopeForIteration control_scope(this, stmt, &loop_builder);
@@ -1251,7 +1195,6 @@
   loop_builder.EndLoop();
 }
 
-
 void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
   TryCatchBuilder try_control_builder(builder());
   Register no_reg;
@@ -1288,7 +1231,6 @@
   try_control_builder.EndCatch();
 }
 
-
 void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
   TryFinallyBuilder try_control_builder(builder(), IsInsideTryCatch());
   Register no_reg;
@@ -1353,13 +1295,11 @@
   commands.ApplyDeferredCommands();
 }
 
-
 void BytecodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
   builder()->SetStatementPosition(stmt);
   builder()->Debugger();
 }
 
-
 void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
   // Find or build a shared function info.
   Handle<SharedFunctionInfo> shared_info =
@@ -1372,7 +1312,6 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
   if (expr->scope()->ContextLocalCount() > 0) {
     VisitNewLocalBlockContext(expr->scope());
@@ -1530,13 +1469,11 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitDoExpression(DoExpression* expr) {
   VisitBlock(expr->block());
   VisitVariableProxy(expr->result());
 }
 
-
 void BytecodeGenerator::VisitConditional(Conditional* expr) {
   // TODO(rmcilroy): Spot easy cases where there code would not need to
   // emit the then block or the else block, e.g. condition is
@@ -1557,21 +1494,20 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitLiteral(Literal* expr) {
   if (!execution_result()->IsEffect()) {
     Handle<Object> value = expr->value();
     if (value->IsSmi()) {
       builder()->LoadLiteral(Smi::cast(*value));
-    } else if (value->IsUndefined()) {
+    } else if (value->IsUndefined(isolate())) {
       builder()->LoadUndefined();
-    } else if (value->IsTrue()) {
+    } else if (value->IsTrue(isolate())) {
       builder()->LoadTrue();
-    } else if (value->IsFalse()) {
+    } else if (value->IsFalse(isolate())) {
       builder()->LoadFalse();
-    } else if (value->IsNull()) {
+    } else if (value->IsNull(isolate())) {
       builder()->LoadNull();
-    } else if (value->IsTheHole()) {
+    } else if (value->IsTheHole(isolate())) {
       builder()->LoadTheHole();
     } else {
       builder()->LoadLiteral(value);
@@ -1580,7 +1516,6 @@
   }
 }
 
-
 void BytecodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
   // Materialize a regular expression literal.
   builder()->CreateRegExpLiteral(expr->pattern(), expr->literal_index(),
@@ -1588,7 +1523,6 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   // Copy the literal boilerplate.
   int fast_clone_properties_count = 0;
@@ -1792,7 +1726,6 @@
   execution_result()->SetResultInRegister(literal);
 }
 
-
 void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   // Deep-copy the literal boilerplate.
   builder()->CreateArrayLiteral(expr->constant_elements(),
@@ -1832,7 +1765,6 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitVariableProxy(VariableProxy* proxy) {
   builder()->SetExpressionPosition(proxy);
   VisitVariableLoad(proxy->var(), proxy->VariableFeedbackSlot());
@@ -1868,8 +1800,7 @@
     }
     case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
-      builder()->LoadGlobal(variable->name(), feedback_index(slot),
-                            typeof_mode);
+      builder()->LoadGlobal(feedback_index(slot), typeof_mode);
       execution_result()->SetResultInAccumulator();
       break;
     }
@@ -1958,6 +1889,15 @@
   builder()->CallRuntime(function_id, receiver, 4);
 }
 
+void BytecodeGenerator::BuildAbort(BailoutReason bailout_reason) {
+  RegisterAllocationScope register_scope(this);
+  Register reason = register_allocator()->NewRegister();
+  builder()
+      ->LoadLiteral(Smi::FromInt(static_cast<int>(bailout_reason)))
+      .StoreAccumulatorInRegister(reason)
+      .CallRuntime(Runtime::kAbort, reason, 1);
+}
+
 void BytecodeGenerator::BuildThrowReferenceError(Handle<String> name) {
   RegisterAllocationScope register_scope(this);
   Register name_reg = register_allocator()->NewRegister();
@@ -2125,7 +2065,6 @@
   }
 }
 
-
 void BytecodeGenerator::VisitAssignment(Assignment* expr) {
   DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
   Register object, key, home_object, value;
@@ -2297,12 +2236,12 @@
 
     Register input = register_allocator()->NewRegister();
     builder()
-        ->CallRuntime(Runtime::kGeneratorGetInput, generator, 1)
+        ->CallRuntime(Runtime::kInlineGeneratorGetInputOrDebugPos, generator, 1)
         .StoreAccumulatorInRegister(input);
 
     Register resume_mode = register_allocator()->NewRegister();
     builder()
-        ->CallRuntime(Runtime::kGeneratorGetResumeMode, generator, 1)
+        ->CallRuntime(Runtime::kInlineGeneratorGetResumeMode, generator, 1)
         .StoreAccumulatorInRegister(resume_mode);
 
     // Now dispatch on resume mode.
@@ -2329,14 +2268,13 @@
           ->MoveRegister(input, value)
           .LoadTrue()
           .StoreAccumulatorInRegister(done)
-          .CallRuntime(Runtime::kCreateIterResultObject, value, 2);
+          .CallRuntime(Runtime::kInlineCreateIterResultObject, value, 2);
       execution_control()->ReturnAccumulator();
     }
 
     builder()->Bind(&resume_with_throw);
-    builder()
-        ->LoadAccumulatorWithRegister(input)
-        .Throw();
+    builder()->SetExpressionPosition(expr);
+    builder()->LoadAccumulatorWithRegister(input).Throw();
 
     builder()->Bind(&resume_with_next);
     builder()->LoadAccumulatorWithRegister(input);
@@ -2355,7 +2293,6 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* expr) {
   LhsKind property_kind = Property::GetAssignType(expr);
   FeedbackVectorSlot slot = expr->PropertyFeedbackSlot();
@@ -2649,7 +2586,6 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitCallRuntime(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   if (expr->is_jsruntime()) {
@@ -2670,14 +2606,12 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitVoid(UnaryOperation* expr) {
   VisitForEffect(expr->expression());
   builder()->LoadUndefined();
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitTypeOf(UnaryOperation* expr) {
   if (expr->expression()->IsVariableProxy()) {
     // Typeof does not throw a reference error on global variables, hence we
@@ -2692,14 +2626,12 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitNot(UnaryOperation* expr) {
   VisitForAccumulatorValue(expr->expression());
   builder()->LogicalNot();
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
   switch (expr->op()) {
     case Token::Value::NOT:
@@ -2725,7 +2657,6 @@
   }
 }
 
-
 void BytecodeGenerator::VisitDelete(UnaryOperation* expr) {
   if (expr->expression()->IsProperty()) {
     // Delete of an object property is allowed both in sloppy
@@ -2787,7 +2718,6 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
   DCHECK(expr->expression()->IsValidReferenceExpressionOrThis());
 
@@ -2909,7 +2839,6 @@
   }
 }
 
-
 void BytecodeGenerator::VisitBinaryOperation(BinaryOperation* binop) {
   switch (binop->op()) {
     case Token::COMMA:
@@ -2927,7 +2856,6 @@
   }
 }
 
-
 void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   Register lhs = VisitForRegisterValue(expr->left());
   VisitForAccumulatorValue(expr->right());
@@ -2936,7 +2864,6 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
   Register lhs = VisitForRegisterValue(expr->left());
   VisitForAccumulatorValue(expr->right());
@@ -2944,39 +2871,32 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitSpread(Spread* expr) { UNREACHABLE(); }
 
-
 void BytecodeGenerator::VisitEmptyParentheses(EmptyParentheses* expr) {
   UNREACHABLE();
 }
 
-
 void BytecodeGenerator::VisitThisFunction(ThisFunction* expr) {
   execution_result()->SetResultInRegister(Register::function_closure());
 }
 
-
 void BytecodeGenerator::VisitSuperCallReference(SuperCallReference* expr) {
   // Handled by VisitCall().
   UNREACHABLE();
 }
 
-
 void BytecodeGenerator::VisitSuperPropertyReference(
     SuperPropertyReference* expr) {
   builder()->CallRuntime(Runtime::kThrowUnsupportedSuperError, Register(0), 0);
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitCommaExpression(BinaryOperation* binop) {
   VisitForEffect(binop->left());
   Visit(binop->right());
 }
 
-
 void BytecodeGenerator::VisitLogicalOrExpression(BinaryOperation* binop) {
   Expression* left = binop->left();
   Expression* right = binop->right();
@@ -2995,7 +2915,6 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitLogicalAndExpression(BinaryOperation* binop) {
   Expression* left = binop->left();
   Expression* right = binop->right();
@@ -3014,12 +2933,10 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitRewritableExpression(RewritableExpression* expr) {
   Visit(expr->expression());
 }
 
-
 void BytecodeGenerator::VisitNewLocalFunctionContext() {
   AccumulatorResultScope accumulator_execution_result(this);
   Scope* scope = this->scope();
@@ -3043,7 +2960,6 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitBuildLocalActivationContext() {
   Scope* scope = this->scope();
 
@@ -3072,7 +2988,6 @@
   }
 }
 
-
 void BytecodeGenerator::VisitNewLocalBlockContext(Scope* scope) {
   AccumulatorResultScope accumulator_execution_result(this);
   DCHECK(scope->is_block_scope());
@@ -3126,7 +3041,6 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitObjectLiteralAccessor(
     Register home_object, ObjectLiteralProperty* property, Register value_out) {
   // TODO(rmcilroy): Replace value_out with VisitForRegister();
@@ -3152,7 +3066,6 @@
   }
 }
 
-
 void BytecodeGenerator::VisitArgumentsObject(Variable* variable) {
   if (variable == nullptr) return;
 
@@ -3187,7 +3100,6 @@
   VisitVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid());
 }
 
-
 void BytecodeGenerator::VisitNewTargetVariable(Variable* variable) {
   if (variable == nullptr) return;
 
@@ -3196,7 +3108,6 @@
   VisitVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid());
 }
 
-
 void BytecodeGenerator::VisitFunctionClosureForContext() {
   AccumulatorResultScope accumulator_execution_result(this);
   Scope* closure_scope = execution_context()->scope()->ClosureScope();
@@ -3223,7 +3134,6 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 // Visits the expression |expr| and places the result in the accumulator.
 void BytecodeGenerator::VisitForAccumulatorValue(Expression* expr) {
   AccumulatorResultScope accumulator_scope(this);
@@ -3244,7 +3154,6 @@
   Visit(expr);
 }
 
-
 // Visits the expression |expr| and returns the register containing
 // the expression result.
 Register BytecodeGenerator::VisitForRegisterValue(Expression* expr) {
@@ -3268,14 +3177,12 @@
   Visit(stmt);
 }
 
-
 LanguageMode BytecodeGenerator::language_mode() const {
   return execution_context()->scope()->language_mode();
 }
 
-
 int BytecodeGenerator::feedback_index(FeedbackVectorSlot slot) const {
-  return info()->shared_info()->feedback_vector()->GetIndex(slot);
+  return TypeFeedbackVector::GetIndex(slot);
 }
 
 }  // namespace interpreter
diff --git a/src/interpreter/bytecode-generator.h b/src/interpreter/bytecode-generator.h
index 0dcc9be..3adca6b 100644
--- a/src/interpreter/bytecode-generator.h
+++ b/src/interpreter/bytecode-generator.h
@@ -7,6 +7,7 @@
 
 #include "src/ast/ast.h"
 #include "src/interpreter/bytecode-array-builder.h"
+#include "src/interpreter/bytecode-label.h"
 #include "src/interpreter/bytecodes.h"
 
 namespace v8 {
@@ -105,6 +106,7 @@
   void BuildKeyedSuperPropertyLoad(Register receiver, Register home_object,
                                    Register key);
 
+  void BuildAbort(BailoutReason bailout_reason);
   void BuildThrowIfHole(Handle<String> name);
   void BuildThrowIfNotHole(Handle<String> name);
   void BuildThrowReassignConstant(Handle<String> name);
diff --git a/src/interpreter/bytecode-label.h b/src/interpreter/bytecode-label.h
new file mode 100644
index 0000000..2f89c48
--- /dev/null
+++ b/src/interpreter/bytecode-label.h
@@ -0,0 +1,56 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_LABEL_H_
+#define V8_INTERPRETER_BYTECODE_LABEL_H_
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// A label representing a branch target in a bytecode array. When a
+// label is bound, it represents a known position in the bytecode
+// array. For labels that are forward references there can be at most
+// one reference whilst it is unbound.
+class BytecodeLabel final {
+ public:
+  BytecodeLabel() : bound_(false), offset_(kInvalidOffset) {}
+
+  bool is_bound() const { return bound_; }
+  size_t offset() const { return offset_; }
+
+ private:
+  static const size_t kInvalidOffset = static_cast<size_t>(-1);
+
+  void bind_to(size_t offset) {
+    DCHECK(!bound_ && offset != kInvalidOffset);
+    offset_ = offset;
+    bound_ = true;
+  }
+
+  void set_referrer(size_t offset) {
+    DCHECK(!bound_ && offset != kInvalidOffset && offset_ == kInvalidOffset);
+    offset_ = offset;
+  }
+
+  bool is_forward_target() const {
+    return offset() != kInvalidOffset && !is_bound();
+  }
+
+  // There are three states for a label:
+  //                    bound_   offset_
+  //  UNSET             false    kInvalidOffset
+  //  FORWARD_TARGET    false    Offset of referring jump
+  //  BACKWARD_TARGET    true    Offset of label in bytecode array when bound
+  bool bound_;
+  size_t offset_;
+
+  friend class BytecodeArrayWriter;
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_BYTECODE_LABEL_H_
diff --git a/src/interpreter/bytecode-peephole-optimizer.cc b/src/interpreter/bytecode-peephole-optimizer.cc
index 803fc23..1108d83 100644
--- a/src/interpreter/bytecode-peephole-optimizer.cc
+++ b/src/interpreter/bytecode-peephole-optimizer.cc
@@ -15,12 +15,57 @@
 BytecodePeepholeOptimizer::BytecodePeepholeOptimizer(
     ConstantArrayBuilder* constant_array_builder,
     BytecodePipelineStage* next_stage)
-    : constant_array_builder_(constant_array_builder),
-      next_stage_(next_stage),
-      last_is_discardable_(false) {
+    : constant_array_builder_(constant_array_builder), next_stage_(next_stage) {
   InvalidateLast();
 }
 
+// override
+Handle<BytecodeArray> BytecodePeepholeOptimizer::ToBytecodeArray(
+    int fixed_register_count, int parameter_count,
+    Handle<FixedArray> handler_table) {
+  Flush();
+  return next_stage_->ToBytecodeArray(fixed_register_count, parameter_count,
+                                      handler_table);
+}
+
+// override
+void BytecodePeepholeOptimizer::Write(BytecodeNode* node) {
+  node = OptimizeAndEmitLast(node);
+  if (node != nullptr) {
+    SetLast(node);
+  }
+}
+
+// override
+void BytecodePeepholeOptimizer::WriteJump(BytecodeNode* node,
+                                          BytecodeLabel* label) {
+  node = OptimizeAndEmitLast(node);
+  next_stage_->WriteJump(node, label);
+}
+
+// override
+void BytecodePeepholeOptimizer::BindLabel(BytecodeLabel* label) {
+  Flush();
+  next_stage_->BindLabel(label);
+}
+
+// override
+void BytecodePeepholeOptimizer::BindLabel(const BytecodeLabel& target,
+                                          BytecodeLabel* label) {
+  // There is no need to flush here, it will have been flushed when |target|
+  // was bound.
+  next_stage_->BindLabel(target, label);
+}
+
+void BytecodePeepholeOptimizer::Flush() {
+  // TODO(oth/rmcilroy): We could check CanElideLast() here to potentially
+  // eliminate last rather than writing it.
+  if (LastIsValid()) {
+    next_stage_->Write(&last_);
+    InvalidateLast();
+  }
+}
+
 void BytecodePeepholeOptimizer::InvalidateLast() {
   last_.set_bytecode(Bytecode::kIllegal);
 }
@@ -31,51 +76,6 @@
 
 void BytecodePeepholeOptimizer::SetLast(const BytecodeNode* const node) {
   last_.Clone(node);
-  last_is_discardable_ = true;
-}
-
-// override
-size_t BytecodePeepholeOptimizer::FlushForOffset() {
-  size_t buffered_size = next_stage_->FlushForOffset();
-  if (LastIsValid()) {
-    if (last_.bytecode() == Bytecode::kNop &&
-        !last_.source_info().is_statement()) {
-      // The Nop can be dropped as it doesn't have a statement
-      // position for the debugger and doesn't have any effects by
-      // definition.
-      InvalidateLast();
-    } else {
-      buffered_size += last_.Size();
-      last_is_discardable_ = false;
-    }
-  }
-  return buffered_size;
-}
-
-// override
-void BytecodePeepholeOptimizer::FlushBasicBlock() {
-  if (LastIsValid()) {
-    next_stage_->Write(&last_);
-    InvalidateLast();
-  }
-  next_stage_->FlushBasicBlock();
-}
-
-// override
-void BytecodePeepholeOptimizer::Write(BytecodeNode* node) {
-  // Attempt optimization if there is an earlier node to optimize with.
-  if (LastIsValid()) {
-    node = Optimize(node);
-    // Only output the last node if it wasn't invalidated by the optimization.
-    if (LastIsValid()) {
-      next_stage_->Write(&last_);
-      InvalidateLast();
-    }
-  }
-
-  if (node != nullptr) {
-    SetLast(node);
-  }
 }
 
 Handle<Object> BytecodePeepholeOptimizer::GetConstantForIndexOperand(
@@ -94,22 +94,18 @@
            GetConstantForIndexOperand(&last_, 0)->IsName()));
 }
 
-void BytecodePeepholeOptimizer::UpdateCurrentBytecode(BytecodeNode* current) {
-  if (Bytecodes::IsJumpIfToBoolean(current->bytecode()) &&
-      Bytecodes::WritesBooleanToAccumulator(last_.bytecode())) {
-    // Conditional jumps with boolean conditions are emitted in
-    // ToBoolean form by the bytecode array builder,
-    // i.e. JumpIfToBooleanTrue rather JumpIfTrue. The ToBoolean element
-    // can be removed if the previous bytecode put a boolean value in
-    // the accumulator.
-    Bytecode jump = Bytecodes::GetJumpWithoutToBoolean(current->bytecode());
-    current->set_bytecode(jump, current->operand(0), current->operand_scale());
-  } else if (current->bytecode() == Bytecode::kToBooleanLogicalNot &&
-             Bytecodes::WritesBooleanToAccumulator(last_.bytecode())) {
-    // Logical-nots are emitted in ToBoolean form by the bytecode array
-    // builder, The ToBoolean element can be removed if the previous bytecode
-    // put a boolean value in the accumulator.
-    current->set_bytecode(Bytecode::kLogicalNot);
+void BytecodePeepholeOptimizer::TryToRemoveLastExpressionPosition(
+    const BytecodeNode* const current) {
+  if (current->source_info().is_valid() &&
+      last_.source_info().is_expression() &&
+      Bytecodes::IsWithoutExternalSideEffects(last_.bytecode())) {
+    // The last bytecode has been marked as expression. It has no
+    // external effects so can't throw and the current bytecode is a
+    // source position. Remove the expression position on the last
+    // bytecode to open up potential peephole optimizations and to
+    // save the memory and perf cost of storing the unneeded
+    // expression position.
+    last_.source_info().set_invalid();
   }
 }
 
@@ -134,15 +130,135 @@
   }
 }
 
+bool BytecodePeepholeOptimizer::CanElideLastBasedOnSourcePosition(
+    const BytecodeNode* const current) const {
+  //
+  // The rules for allowing the elision of the last bytecode based
+  // on source position are:
+  //
+  //                     C U R R E N T
+  //              +--------+--------+--------+
+  //              |  None  |  Expr  |  Stmt  |
+  //  L  +--------+--------+--------+--------+
+  //     |  None  |  YES   |  YES   |  YES   |
+  //  A  +--------+--------+--------+--------+
+  //     |  Expr  |  YES   | MAYBE  |  MAYBE |
+  //  S  +--------+--------+--------+--------+
+  //     |  Stmt  |  YES   |   NO   |   NO   |
+  //  T  +--------+--------+--------+--------+
+  //
+  // The goal is not lose any statement positions and not lose useful
+  // expression positions. Whenever the last bytecode is elided it's
+  // source position information is applied to the current node
+  // updating it if necessary.
+  //
+  // The last bytecode can be elided for the MAYBE cases if the last
+  // bytecode is known not to throw. If it throws, the system would
+  // not have correct stack trace information. The appropriate check
+  // for this would be Bytecodes::IsWithoutExternalSideEffects(),
+  // which is checked in
+  // BytecodePeepholeOptimizer::TransformLastAndCurrentBytecodes() to
+  // keep the check here simple.
+  //
+  // In rare cases, bytecode generation produces consecutive bytecodes
+  // with the same expression positions. In principle, the latter of
+  // these can be elided, but would make this function more expensive.
+  //
+  return (!last_.source_info().is_valid() ||
+          !current->source_info().is_valid());
+}
+
+namespace {
+
+void TransformLdaStarToLdrLdar(Bytecode new_bytecode, BytecodeNode* const last,
+                               BytecodeNode* const current) {
+  DCHECK_EQ(current->bytecode(), Bytecode::kStar);
+
+  //
+  // An example transformation here would be:
+  //
+  //   LdaGlobal i0, i1  ____\  LdrGlobal i0, i1, R
+  //   Star R            ====/  Ldar R
+  //
+  // which loads a global value into both a register and the
+  // accumulator. However, in the second form the Ldar can often be
+  // peephole optimized away unlike the Star in the first form.
+  //
+  last->Transform(new_bytecode, current->operand(0));
+  current->set_bytecode(Bytecode::kLdar, current->operand(0));
+}
+
+}  // namespace
+
+bool BytecodePeepholeOptimizer::TransformLastAndCurrentBytecodes(
+    BytecodeNode* const current) {
+  if (current->bytecode() == Bytecode::kStar &&
+      !current->source_info().is_statement()) {
+    // Note: If the Star is tagged with a statement position, we can't
+    // perform this transform as the store to the register will
+    // have the wrong ordering for stepping in the debugger.
+    switch (last_.bytecode()) {
+      case Bytecode::kLdaNamedProperty:
+        TransformLdaStarToLdrLdar(Bytecode::kLdrNamedProperty, &last_, current);
+        return true;
+      case Bytecode::kLdaKeyedProperty:
+        TransformLdaStarToLdrLdar(Bytecode::kLdrKeyedProperty, &last_, current);
+        return true;
+      case Bytecode::kLdaGlobal:
+        TransformLdaStarToLdrLdar(Bytecode::kLdrGlobal, &last_, current);
+        return true;
+      case Bytecode::kLdaContextSlot:
+        TransformLdaStarToLdrLdar(Bytecode::kLdrContextSlot, &last_, current);
+        return true;
+      case Bytecode::kLdaUndefined:
+        TransformLdaStarToLdrLdar(Bytecode::kLdrUndefined, &last_, current);
+        return true;
+      default:
+        break;
+    }
+  }
+  return false;
+}
+
+bool BytecodePeepholeOptimizer::RemoveToBooleanFromJump(
+    BytecodeNode* const current) {
+  bool can_remove = Bytecodes::IsJumpIfToBoolean(current->bytecode()) &&
+                    Bytecodes::WritesBooleanToAccumulator(last_.bytecode());
+  if (can_remove) {
+    // Conditional jumps with boolean conditions are emiitted in
+    // ToBoolean form by the bytecode array builder,
+    // i.e. JumpIfToBooleanTrue rather JumpIfTrue. The ToBoolean
+    // element can be removed if the previous bytecode put a boolean
+    // value in the accumulator.
+    Bytecode jump = Bytecodes::GetJumpWithoutToBoolean(current->bytecode());
+    current->set_bytecode(jump, current->operand(0));
+  }
+  return can_remove;
+}
+
+bool BytecodePeepholeOptimizer::RemoveToBooleanFromLogicalNot(
+    BytecodeNode* const current) {
+  bool can_remove = current->bytecode() == Bytecode::kToBooleanLogicalNot &&
+                    Bytecodes::WritesBooleanToAccumulator(last_.bytecode());
+  if (can_remove) {
+    // Logical-nots are emitted in ToBoolean form by the bytecode array
+    // builder, The ToBoolean element can be removed if the previous bytecode
+    // put a boolean value in the accumulator.
+    current->set_bytecode(Bytecode::kLogicalNot);
+  }
+  return can_remove;
+}
+
+bool BytecodePeepholeOptimizer::TransformCurrentBytecode(
+    BytecodeNode* const current) {
+  return RemoveToBooleanFromJump(current) ||
+         RemoveToBooleanFromLogicalNot(current);
+}
+
 bool BytecodePeepholeOptimizer::CanElideLast(
     const BytecodeNode* const current) const {
-  if (!last_is_discardable_) {
-    return false;
-  }
-
   if (last_.bytecode() == Bytecode::kNop) {
-    // Nop are placeholders for holding source position information
-    // and can be elided.
+    // Nop are placeholders for holding source position information.
     return true;
   } else if (Bytecodes::IsAccumulatorLoadWithoutEffects(current->bytecode()) &&
              Bytecodes::IsAccumulatorLoadWithoutEffects(last_.bytecode())) {
@@ -150,25 +266,58 @@
     // consecutive accumulator loads (that don't have side effects) then only
     // the final load is potentially visible.
     return true;
+  } else if (Bytecodes::GetAccumulatorUse(current->bytecode()) ==
+                 AccumulatorUse::kWrite &&
+             Bytecodes::IsAccumulatorLoadWithoutEffects(last_.bytecode())) {
+    // The current instruction clobbers the accumulator without reading it. The
+    // load in the last instruction can be elided as it has no effect.
+    return true;
   } else {
     return false;
   }
 }
 
 BytecodeNode* BytecodePeepholeOptimizer::Optimize(BytecodeNode* current) {
-  UpdateCurrentBytecode(current);
+  TryToRemoveLastExpressionPosition(current);
+
+  if (TransformCurrentBytecode(current) ||
+      TransformLastAndCurrentBytecodes(current)) {
+    return current;
+  }
 
   if (CanElideCurrent(current)) {
     if (current->source_info().is_valid()) {
+      // Preserve the source information by replacing the current bytecode
+      // with a no op bytecode.
       current->set_bytecode(Bytecode::kNop);
     } else {
       current = nullptr;
     }
-  } else if (CanElideLast(current)) {
+    return current;
+  }
+
+  if (CanElideLast(current) && CanElideLastBasedOnSourcePosition(current)) {
     if (last_.source_info().is_valid()) {
-      current->source_info().Update(last_.source_info());
+      // Current can not be valid per CanElideLastBasedOnSourcePosition().
+      current->source_info().Clone(last_.source_info());
     }
     InvalidateLast();
+    return current;
+  }
+
+  return current;
+}
+
+BytecodeNode* BytecodePeepholeOptimizer::OptimizeAndEmitLast(
+    BytecodeNode* current) {
+  // Attempt optimization if there is an earlier node to optimize with.
+  if (LastIsValid()) {
+    current = Optimize(current);
+    // Only output the last node if it wasn't invalidated by the optimization.
+    if (LastIsValid()) {
+      next_stage_->Write(&last_);
+      InvalidateLast();
+    }
   }
   return current;
 }
diff --git a/src/interpreter/bytecode-peephole-optimizer.h b/src/interpreter/bytecode-peephole-optimizer.h
index 1981395..e6ada2a 100644
--- a/src/interpreter/bytecode-peephole-optimizer.h
+++ b/src/interpreter/bytecode-peephole-optimizer.h
@@ -22,16 +22,31 @@
   BytecodePeepholeOptimizer(ConstantArrayBuilder* constant_array_builder,
                             BytecodePipelineStage* next_stage);
 
+  // BytecodePipelineStage interface.
   void Write(BytecodeNode* node) override;
-  size_t FlushForOffset() override;
-  void FlushBasicBlock() override;
+  void WriteJump(BytecodeNode* node, BytecodeLabel* label) override;
+  void BindLabel(BytecodeLabel* label) override;
+  void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
+  Handle<BytecodeArray> ToBytecodeArray(
+      int fixed_register_count, int parameter_count,
+      Handle<FixedArray> handler_table) override;
 
  private:
+  BytecodeNode* OptimizeAndEmitLast(BytecodeNode* current);
   BytecodeNode* Optimize(BytecodeNode* current);
+  void Flush();
 
-  void UpdateCurrentBytecode(BytecodeNode* const current);
+  void TryToRemoveLastExpressionPosition(const BytecodeNode* const current);
+  bool TransformCurrentBytecode(BytecodeNode* const current);
+  bool TransformLastAndCurrentBytecodes(BytecodeNode* const current);
   bool CanElideCurrent(const BytecodeNode* const current) const;
   bool CanElideLast(const BytecodeNode* const current) const;
+  bool CanElideLastBasedOnSourcePosition(
+      const BytecodeNode* const current) const;
+
+  // Simple substitution methods.
+  bool RemoveToBooleanFromJump(BytecodeNode* const current);
+  bool RemoveToBooleanFromLogicalNot(BytecodeNode* const current);
 
   void InvalidateLast();
   bool LastIsValid() const;
@@ -45,7 +60,6 @@
   ConstantArrayBuilder* constant_array_builder_;
   BytecodePipelineStage* next_stage_;
   BytecodeNode last_;
-  bool last_is_discardable_;
 
   DISALLOW_COPY_AND_ASSIGN(BytecodePeepholeOptimizer);
 };
diff --git a/src/interpreter/bytecode-pipeline.cc b/src/interpreter/bytecode-pipeline.cc
index 7bfb815..58ade92 100644
--- a/src/interpreter/bytecode-pipeline.cc
+++ b/src/interpreter/bytecode-pipeline.cc
@@ -11,104 +11,74 @@
 namespace internal {
 namespace interpreter {
 
-void BytecodeSourceInfo::Update(const BytecodeSourceInfo& entry) {
-  DCHECK(entry.is_valid());
-  if (!is_valid() || (entry.is_statement() && !is_statement()) ||
-      (entry.is_statement() && is_statement() &&
-       entry.source_position() > source_position())) {
-    // Position is updated if any of the following conditions are met:
-    //   (1) there is no existing position.
-    //   (2) the incoming position is a statement and the current position
-    //       is an expression.
-    //   (3) the existing position is a statement and the incoming
-    //       statement has a later source position.
-    // Condition 3 is needed for the first statement in a function which
-    // may end up with later statement positions being added during bytecode
-    // generation.
-    source_position_ = entry.source_position_;
-    is_statement_ = entry.is_statement_;
-  }
-}
-
 BytecodeNode::BytecodeNode(Bytecode bytecode) {
   DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
   bytecode_ = bytecode;
-  operand_scale_ = OperandScale::kSingle;
 }
 
-BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0,
-                           OperandScale operand_scale) {
+BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0) {
   DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
   bytecode_ = bytecode;
   operands_[0] = operand0;
-  operand_scale_ = operand_scale;
 }
 
 BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0,
-                           uint32_t operand1, OperandScale operand_scale) {
+                           uint32_t operand1) {
   DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 2);
   bytecode_ = bytecode;
   operands_[0] = operand0;
   operands_[1] = operand1;
-  operand_scale_ = operand_scale;
 }
 
 BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0,
-                           uint32_t operand1, uint32_t operand2,
-                           OperandScale operand_scale) {
+                           uint32_t operand1, uint32_t operand2) {
   DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 3);
   bytecode_ = bytecode;
   operands_[0] = operand0;
   operands_[1] = operand1;
   operands_[2] = operand2;
-  operand_scale_ = operand_scale;
 }
 
 BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0,
                            uint32_t operand1, uint32_t operand2,
-                           uint32_t operand3, OperandScale operand_scale) {
+                           uint32_t operand3) {
   DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 4);
   bytecode_ = bytecode;
   operands_[0] = operand0;
   operands_[1] = operand1;
   operands_[2] = operand2;
   operands_[3] = operand3;
-  operand_scale_ = operand_scale;
+}
+
+BytecodeNode::BytecodeNode(const BytecodeNode& other) {
+  memcpy(this, &other, sizeof(other));
+}
+
+BytecodeNode& BytecodeNode::operator=(const BytecodeNode& other) {
+  memcpy(this, &other, sizeof(other));
+  return *this;
 }
 
 void BytecodeNode::set_bytecode(Bytecode bytecode) {
   DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
   bytecode_ = bytecode;
-  operand_scale_ = OperandScale::kSingle;
 }
 
-void BytecodeNode::set_bytecode(Bytecode bytecode, uint32_t operand0,
-                                OperandScale operand_scale) {
+void BytecodeNode::set_bytecode(Bytecode bytecode, uint32_t operand0) {
   DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
   bytecode_ = bytecode;
   operands_[0] = operand0;
-  operand_scale_ = operand_scale;
 }
 
-size_t BytecodeNode::Size() const {
-  size_t size = Bytecodes::Size(bytecode_, operand_scale_);
-  if (Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale_)) {
-    size += 1;
-  }
-  return size;
+void BytecodeNode::Clone(const BytecodeNode* const other) {
+  memcpy(this, other, sizeof(*other));
 }
 
 void BytecodeNode::Print(std::ostream& os) const {
 #ifdef DEBUG
   std::ios saved_state(nullptr);
   saved_state.copyfmt(os);
-
   os << Bytecodes::ToString(bytecode_);
-  if (Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale_)) {
-    Bytecode scale_prefix =
-        Bytecodes::OperandScaleToPrefixBytecode(operand_scale_);
-    os << '.' << Bytecodes::ToString(scale_prefix);
-  }
 
   for (int i = 0; i < operand_count(); ++i) {
     os << ' ' << std::setw(8) << std::setfill('0') << std::hex << operands_[i];
@@ -116,7 +86,7 @@
   os.copyfmt(saved_state);
 
   if (source_info_.is_valid()) {
-    os << source_info_;
+    os << ' ' << source_info_;
   }
   os << '\n';
 #else
@@ -124,8 +94,21 @@
 #endif  // DEBUG
 }
 
-void BytecodeNode::Clone(const BytecodeNode* const other) {
-  memcpy(this, other, sizeof(*other));
+void BytecodeNode::Transform(Bytecode new_bytecode, uint32_t extra_operand) {
+  DCHECK_EQ(Bytecodes::NumberOfOperands(new_bytecode),
+            Bytecodes::NumberOfOperands(bytecode()) + 1);
+  DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 1 ||
+         Bytecodes::GetOperandType(new_bytecode, 0) ==
+             Bytecodes::GetOperandType(bytecode(), 0));
+  DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 2 ||
+         Bytecodes::GetOperandType(new_bytecode, 1) ==
+             Bytecodes::GetOperandType(bytecode(), 1));
+  DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 3 ||
+         Bytecodes::GetOperandType(new_bytecode, 2) ==
+             Bytecodes::GetOperandType(bytecode(), 2));
+  DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 4);
+  operands_[operand_count()] = extra_operand;
+  bytecode_ = new_bytecode;
 }
 
 bool BytecodeNode::operator==(const BytecodeNode& other) const {
@@ -144,11 +127,6 @@
   return true;
 }
 
-std::ostream& operator<<(std::ostream& os, const BytecodeNode& node) {
-  node.Print(os);
-  return os;
-}
-
 std::ostream& operator<<(std::ostream& os, const BytecodeSourceInfo& info) {
   if (info.is_valid()) {
     char description = info.is_statement() ? 'S' : 'E';
@@ -157,6 +135,11 @@
   return os;
 }
 
+std::ostream& operator<<(std::ostream& os, const BytecodeNode& node) {
+  node.Print(os);
+  return os;
+}
+
 }  // namespace interpreter
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interpreter/bytecode-pipeline.h b/src/interpreter/bytecode-pipeline.h
index ade712c..e2beff2 100644
--- a/src/interpreter/bytecode-pipeline.h
+++ b/src/interpreter/bytecode-pipeline.h
@@ -13,6 +13,7 @@
 namespace internal {
 namespace interpreter {
 
+class BytecodeLabel;
 class BytecodeNode;
 class BytecodeSourceInfo;
 
@@ -26,12 +27,26 @@
   // deferring Write() to the next stage.
   virtual void Write(BytecodeNode* node) = 0;
 
-  // Flush state for bytecode array offset calculation. Returns the
-  // current size of bytecode array.
-  virtual size_t FlushForOffset() = 0;
+  // Write jump bytecode node |node| which jumps to |label| into pipeline.
+  // The node and label are only valid for the duration of the call. This call
+  // implicitly ends the current basic block so should always write to the next
+  // stage.
+  virtual void WriteJump(BytecodeNode* node, BytecodeLabel* label) = 0;
 
-  // Flush state to terminate basic block.
-  virtual void FlushBasicBlock() = 0;
+  // Binds |label| to the current bytecode location. This call implicitly
+  // ends the current basic block and so any deferred bytecodes should be
+  // written to the next stage.
+  virtual void BindLabel(BytecodeLabel* label) = 0;
+
+  // Binds |label| to the location of |target|. This call implicitly
+  // ends the current basic block and so any deferred bytecodes should be
+  // written to the next stage.
+  virtual void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) = 0;
+
+  // Flush the pipeline and generate a bytecode array.
+  virtual Handle<BytecodeArray> ToBytecodeArray(
+      int fixed_register_count, int parameter_count,
+      Handle<FixedArray> handler_table) = 0;
 };
 
 // Source code position information.
@@ -39,35 +54,84 @@
  public:
   static const int kUninitializedPosition = -1;
 
-  BytecodeSourceInfo(int position = kUninitializedPosition,
-                     bool is_statement = false)
-      : source_position_(position), is_statement_(is_statement) {}
+  BytecodeSourceInfo()
+      : position_type_(PositionType::kNone),
+        source_position_(kUninitializedPosition) {}
 
-  // Combine later source info with current.
-  void Update(const BytecodeSourceInfo& entry);
+  BytecodeSourceInfo(int source_position, bool is_statement)
+      : position_type_(is_statement ? PositionType::kStatement
+                                    : PositionType::kExpression),
+        source_position_(source_position) {
+    DCHECK_GE(source_position, 0);
+  }
+
+  // Makes instance into a statement position.
+  void MakeStatementPosition(int source_position) {
+    // Statement positions can be replaced by other statement
+    // positions. For example , "for (x = 0; x < 3; ++x) 7;" has a
+    // statement position associated with 7 but no bytecode associated
+    // with it. Then Next is emitted after the body and has
+    // statement position and overrides the existing one.
+    position_type_ = PositionType::kStatement;
+    source_position_ = source_position;
+  }
+
+  // Makes instance into an expression position. Instance should not
+  // be a statement position otherwise it could be lost and impair the
+  // debugging experience.
+  void MakeExpressionPosition(int source_position) {
+    DCHECK(!is_statement());
+    position_type_ = PositionType::kExpression;
+    source_position_ = source_position;
+  }
+
+  // Forces an instance into an expression position.
+  void ForceExpressionPosition(int source_position) {
+    position_type_ = PositionType::kExpression;
+    source_position_ = source_position;
+  }
+
+  // Clones a source position. The current instance is expected to be
+  // invalid.
+  void Clone(const BytecodeSourceInfo& other) {
+    DCHECK(!is_valid());
+    position_type_ = other.position_type_;
+    source_position_ = other.source_position_;
+  }
 
   int source_position() const {
     DCHECK(is_valid());
     return source_position_;
   }
 
-  bool is_statement() const { return is_valid() && is_statement_; }
+  bool is_statement() const {
+    return position_type_ == PositionType::kStatement;
+  }
+  bool is_expression() const {
+    return position_type_ == PositionType::kExpression;
+  }
 
-  bool is_valid() const { return source_position_ != kUninitializedPosition; }
-  void set_invalid() { source_position_ = kUninitializedPosition; }
+  bool is_valid() const { return position_type_ != PositionType::kNone; }
+  void set_invalid() {
+    position_type_ = PositionType::kNone;
+    source_position_ = kUninitializedPosition;
+  }
 
   bool operator==(const BytecodeSourceInfo& other) const {
-    return source_position_ == other.source_position_ &&
-           is_statement_ == other.is_statement_;
+    return position_type_ == other.position_type_ &&
+           source_position_ == other.source_position_;
   }
+
   bool operator!=(const BytecodeSourceInfo& other) const {
-    return source_position_ != other.source_position_ ||
-           is_statement_ != other.is_statement_;
+    return position_type_ != other.position_type_ ||
+           source_position_ != other.source_position_;
   }
 
  private:
+  enum class PositionType : uint8_t { kNone, kExpression, kStatement };
+
+  PositionType position_type_;
   int source_position_;
-  bool is_statement_;
 
   DISALLOW_COPY_AND_ASSIGN(BytecodeSourceInfo);
 };
@@ -77,19 +141,18 @@
 class BytecodeNode final : ZoneObject {
  public:
   explicit BytecodeNode(Bytecode bytecode = Bytecode::kIllegal);
-  BytecodeNode(Bytecode bytecode, uint32_t operand0,
-               OperandScale operand_scale);
+  BytecodeNode(Bytecode bytecode, uint32_t operand0);
+  BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1);
   BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
-               OperandScale operand_scale);
+               uint32_t operand2);
   BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
-               uint32_t operand2, OperandScale operand_scale);
-  BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
-               uint32_t operand2, uint32_t operand3,
-               OperandScale operand_scale);
+               uint32_t operand2, uint32_t operand3);
+
+  BytecodeNode(const BytecodeNode& other);
+  BytecodeNode& operator=(const BytecodeNode& other);
 
   void set_bytecode(Bytecode bytecode);
-  void set_bytecode(Bytecode bytecode, uint32_t operand0,
-                    OperandScale operand_scale);
+  void set_bytecode(Bytecode bytecode, uint32_t operand0);
 
   // Clone |other|.
   void Clone(const BytecodeNode* const other);
@@ -97,8 +160,9 @@
   // Print to stream |os|.
   void Print(std::ostream& os) const;
 
-  // Return the size when this node is serialized to a bytecode array.
-  size_t Size() const;
+  // Transform to a node representing |new_bytecode| which has one
+  // operand more than the current bytecode.
+  void Transform(Bytecode new_bytecode, uint32_t extra_operand);
 
   Bytecode bytecode() const { return bytecode_; }
 
@@ -110,7 +174,6 @@
   const uint32_t* operands() const { return operands_; }
 
   int operand_count() const { return Bytecodes::NumberOfOperands(bytecode_); }
-  OperandScale operand_scale() const { return operand_scale_; }
 
   const BytecodeSourceInfo& source_info() const { return source_info_; }
   BytecodeSourceInfo& source_info() { return source_info_; }
@@ -124,7 +187,6 @@
 
   Bytecode bytecode_;
   uint32_t operands_[kMaxOperands];
-  OperandScale operand_scale_;
   BytecodeSourceInfo source_info_;
 };
 
diff --git a/src/interpreter/bytecode-register-allocator.cc b/src/interpreter/bytecode-register-allocator.cc
index 9bdde9a..10afcdc 100644
--- a/src/interpreter/bytecode-register-allocator.cc
+++ b/src/interpreter/bytecode-register-allocator.cc
@@ -14,7 +14,8 @@
                                                        int allocation_base)
     : free_temporaries_(zone),
       allocation_base_(allocation_base),
-      allocation_count_(0) {}
+      allocation_count_(0),
+      observer_(nullptr) {}
 
 Register TemporaryRegisterAllocator::first_temporary_register() const {
   DCHECK(allocation_count() > 0);
@@ -26,6 +27,12 @@
   return Register(allocation_base() + allocation_count() - 1);
 }
 
+void TemporaryRegisterAllocator::set_observer(
+    TemporaryRegisterObserver* observer) {
+  DCHECK(observer_ == nullptr);
+  observer_ = observer;
+}
+
 int TemporaryRegisterAllocator::AllocateTemporaryRegister() {
   allocation_count_ += 1;
   return allocation_base() + allocation_count() - 1;
@@ -140,6 +147,9 @@
 void TemporaryRegisterAllocator::ReturnTemporaryRegister(int reg_index) {
   DCHECK(free_temporaries_.find(reg_index) == free_temporaries_.end());
   free_temporaries_.insert(reg_index);
+  if (observer_) {
+    observer_->TemporaryRegisterFreeEvent(Register(reg_index));
+  }
 }
 
 BytecodeRegisterAllocator::BytecodeRegisterAllocator(
@@ -156,7 +166,6 @@
   allocated_.clear();
 }
 
-
 Register BytecodeRegisterAllocator::NewRegister() {
   int allocated = -1;
   if (next_consecutive_count_ <= 0) {
@@ -170,7 +179,6 @@
   return Register(allocated);
 }
 
-
 bool BytecodeRegisterAllocator::RegisterIsAllocatedInThisScope(
     Register reg) const {
   for (auto i = allocated_.begin(); i != allocated_.end(); i++) {
@@ -179,7 +187,6 @@
   return false;
 }
 
-
 void BytecodeRegisterAllocator::PrepareForConsecutiveAllocations(size_t count) {
   if (static_cast<int>(count) > next_consecutive_count_) {
     next_consecutive_register_ =
@@ -188,7 +195,6 @@
   }
 }
 
-
 Register BytecodeRegisterAllocator::NextConsecutiveRegister() {
   DCHECK_GE(next_consecutive_register_, 0);
   DCHECK_GT(next_consecutive_count_, 0);
diff --git a/src/interpreter/bytecode-register-allocator.h b/src/interpreter/bytecode-register-allocator.h
index a4f6845..b8f737b 100644
--- a/src/interpreter/bytecode-register-allocator.h
+++ b/src/interpreter/bytecode-register-allocator.h
@@ -14,6 +14,7 @@
 
 class BytecodeArrayBuilder;
 class Register;
+class TemporaryRegisterObserver;
 
 class TemporaryRegisterAllocator final {
  public:
@@ -54,6 +55,9 @@
   // Returns the number of temporary register allocations made.
   int allocation_count() const { return allocation_count_; }
 
+  // Sets an observer for temporary register events.
+  void set_observer(TemporaryRegisterObserver* observer);
+
  private:
   // Allocate a temporary register.
   int AllocateTemporaryRegister();
@@ -61,10 +65,17 @@
   ZoneSet<int> free_temporaries_;
   int allocation_base_;
   int allocation_count_;
+  TemporaryRegisterObserver* observer_;
 
   DISALLOW_COPY_AND_ASSIGN(TemporaryRegisterAllocator);
 };
 
+class TemporaryRegisterObserver {
+ public:
+  virtual ~TemporaryRegisterObserver() {}
+  virtual void TemporaryRegisterFreeEvent(Register reg) = 0;
+};
+
 // A class that allows the instantiator to allocate temporary registers that are
 // cleaned up when scope is closed.
 class BytecodeRegisterAllocator final {
diff --git a/src/interpreter/bytecode-register-optimizer.cc b/src/interpreter/bytecode-register-optimizer.cc
new file mode 100644
index 0000000..ab25f95
--- /dev/null
+++ b/src/interpreter/bytecode-register-optimizer.cc
@@ -0,0 +1,630 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-register-optimizer.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+const uint32_t BytecodeRegisterOptimizer::kInvalidEquivalenceId;
+
+// A class for tracking the state of a register. This class tracks
+// which equivalence set a register is a member of and also whether a
+// register is materialized in the bytecode stream.
+class BytecodeRegisterOptimizer::RegisterInfo final : public ZoneObject {
+ public:
+  RegisterInfo(Register reg, uint32_t equivalence_id, bool materialized)
+      : register_(reg),
+        equivalence_id_(equivalence_id),
+        materialized_(materialized),
+        next_(this),
+        prev_(this) {}
+
+  void AddToEquivalenceSetOf(RegisterInfo* info);
+  void MoveToNewEquivalenceSet(uint32_t equivalence_id, bool materialized);
+  bool IsOnlyMemberOfEquivalenceSet() const;
+  bool IsOnlyMaterializedMemberOfEquivalenceSet() const;
+  bool IsInSameEquivalenceSet(RegisterInfo* info) const;
+
+  // Get a member of this register's equivalence set that is
+  // materialized. The materialized equivalent will be this register
+  // if it is materialized. Returns nullptr if no materialized
+  // equivalent exists.
+  RegisterInfo* GetMaterializedEquivalent();
+
+  // Get a member of this register's equivalence set that is
+  // materialized and not register |reg|. The materialized equivalent
+  // will be this register if it is materialized. Returns nullptr if
+  // no materialized equivalent exists.
+  RegisterInfo* GetMaterializedEquivalentOtherThan(Register reg);
+
+  // Get a member of this register's equivalence set that is intended
+  // to be materialized in place of this register (which is currently
+  // materialized). The best candidate is deemed to be the register
+  // with the lowest index as this permits temporary registers to be
+  // removed from the bytecode stream. Returns nullptr if no candidate
+  // exists.
+  RegisterInfo* GetEquivalentToMaterialize();
+
+  // Get an equivalent register. Returns this if none exists.
+  RegisterInfo* GetEquivalent();
+
+  Register register_value() const { return register_; }
+  bool materialized() const { return materialized_; }
+  void set_materialized(bool materialized) { materialized_ = materialized; }
+  void set_equivalence_id(uint32_t equivalence_id) {
+    equivalence_id_ = equivalence_id;
+  }
+  uint32_t equivalence_id() const { return equivalence_id_; }
+
+ private:
+  Register register_;
+  uint32_t equivalence_id_;
+  bool materialized_;
+
+  // Equivalence set pointers.
+  RegisterInfo* next_;
+  RegisterInfo* prev_;
+
+  DISALLOW_COPY_AND_ASSIGN(RegisterInfo);
+};
+
+void BytecodeRegisterOptimizer::RegisterInfo::AddToEquivalenceSetOf(
+    RegisterInfo* info) {
+  DCHECK_NE(kInvalidEquivalenceId, info->equivalence_id());
+  // Fix old list
+  next_->prev_ = prev_;
+  prev_->next_ = next_;
+  // Add to new list.
+  next_ = info->next_;
+  prev_ = info;
+  prev_->next_ = this;
+  next_->prev_ = this;
+  set_equivalence_id(info->equivalence_id());
+  set_materialized(false);
+}
+
+void BytecodeRegisterOptimizer::RegisterInfo::MoveToNewEquivalenceSet(
+    uint32_t equivalence_id, bool materialized) {
+  next_->prev_ = prev_;
+  prev_->next_ = next_;
+  next_ = prev_ = this;
+  equivalence_id_ = equivalence_id;
+  materialized_ = materialized;
+}
+
+bool BytecodeRegisterOptimizer::RegisterInfo::IsOnlyMemberOfEquivalenceSet()
+    const {
+  return this->next_ == this;
+}
+
+bool BytecodeRegisterOptimizer::RegisterInfo::
+    IsOnlyMaterializedMemberOfEquivalenceSet() const {
+  DCHECK(materialized());
+
+  const RegisterInfo* visitor = this->next_;
+  while (visitor != this) {
+    if (visitor->materialized()) {
+      return false;
+    }
+    visitor = visitor->next_;
+  }
+  return true;
+}
+
+bool BytecodeRegisterOptimizer::RegisterInfo::IsInSameEquivalenceSet(
+    RegisterInfo* info) const {
+  return equivalence_id() == info->equivalence_id();
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::RegisterInfo::GetMaterializedEquivalent() {
+  RegisterInfo* visitor = this;
+  do {
+    if (visitor->materialized()) {
+      return visitor;
+    }
+    visitor = visitor->next_;
+  } while (visitor != this);
+
+  return nullptr;
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::RegisterInfo::GetMaterializedEquivalentOtherThan(
+    Register reg) {
+  RegisterInfo* visitor = this;
+  do {
+    if (visitor->materialized() && visitor->register_value() != reg) {
+      return visitor;
+    }
+    visitor = visitor->next_;
+  } while (visitor != this);
+
+  return nullptr;
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::RegisterInfo::GetEquivalentToMaterialize() {
+  DCHECK(this->materialized());
+  RegisterInfo* visitor = this->next_;
+  RegisterInfo* best_info = nullptr;
+  while (visitor != this) {
+    if (visitor->materialized()) {
+      return nullptr;
+    }
+    if (best_info == nullptr ||
+        visitor->register_value() < best_info->register_value()) {
+      best_info = visitor;
+    }
+    visitor = visitor->next_;
+  }
+  return best_info;
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::RegisterInfo::GetEquivalent() {
+  return next_;
+}
+
+BytecodeRegisterOptimizer::BytecodeRegisterOptimizer(
+    Zone* zone, TemporaryRegisterAllocator* register_allocator,
+    int parameter_count, BytecodePipelineStage* next_stage)
+    : accumulator_(Register::virtual_accumulator()),
+      temporary_base_(register_allocator->allocation_base()),
+      register_info_table_(zone),
+      equivalence_id_(0),
+      next_stage_(next_stage),
+      flush_required_(false),
+      zone_(zone) {
+  register_allocator->set_observer(this);
+
+  // Calculate offset so register index values can be mapped into
+  // a vector of register metadata.
+  if (parameter_count != 0) {
+    register_info_table_offset_ =
+        -Register::FromParameterIndex(0, parameter_count).index();
+  } else {
+    // TODO(oth): This path shouldn't be necessary in bytecode generated
+    // from Javascript, but a set of tests do not include the JS receiver.
+    register_info_table_offset_ = -accumulator_.index();
+  }
+
+  // Initialize register map for parameters, locals, and the
+  // accumulator.
+  register_info_table_.resize(register_info_table_offset_ +
+                              static_cast<size_t>(temporary_base_.index()));
+  for (size_t i = 0; i < register_info_table_.size(); ++i) {
+    register_info_table_[i] = new (zone) RegisterInfo(
+        RegisterFromRegisterInfoTableIndex(i), NextEquivalenceId(), true);
+    DCHECK_EQ(register_info_table_[i]->register_value().index(),
+              RegisterFromRegisterInfoTableIndex(i).index());
+  }
+  accumulator_info_ = GetRegisterInfo(accumulator_);
+  DCHECK(accumulator_info_->register_value() == accumulator_);
+}
+
+// override
+Handle<BytecodeArray> BytecodeRegisterOptimizer::ToBytecodeArray(
+    int fixed_register_count, int parameter_count,
+    Handle<FixedArray> handler_table) {
+  FlushState();
+  return next_stage_->ToBytecodeArray(fixed_register_count, parameter_count,
+                                      handler_table);
+}
+
+// override
+void BytecodeRegisterOptimizer::Write(BytecodeNode* node) {
+  //
+  // Transfers with observable registers as the destination will be
+  // immediately materialized so the source position information will
+  // be ordered correctly.
+  //
+  // Transfers without observable destination registers will initially
+  // be emitted as Nop's with the source position. They may, or may
+  // not, be materialized by the optimizer. However, the source
+  // position is not lost and being attached to a Nop is fine as the
+  // destination register is not observable in the debugger.
+  //
+  switch (node->bytecode()) {
+    case Bytecode::kLdar: {
+      DoLdar(node);
+      return;
+    }
+    case Bytecode::kStar: {
+      DoStar(node);
+      return;
+    }
+    case Bytecode::kMov: {
+      DoMov(node);
+      return;
+    }
+    default:
+      break;
+  }
+
+  if (Bytecodes::IsJump(node->bytecode()) ||
+      node->bytecode() == Bytecode::kDebugger ||
+      node->bytecode() == Bytecode::kSuspendGenerator) {
+    // All state must be flushed before emitting
+    // - a jump (due to how bytecode offsets for jumps are evaluated),
+    // - a call to the debugger (as it can manipulate locals and parameters),
+    // - a generator suspend (as this involves saving all registers).
+    FlushState();
+  }
+
+  PrepareOperands(node);
+  WriteToNextStage(node);
+}
+
+// override
+void BytecodeRegisterOptimizer::WriteJump(BytecodeNode* node,
+                                          BytecodeLabel* label) {
+  FlushState();
+  next_stage_->WriteJump(node, label);
+}
+
+// override
+void BytecodeRegisterOptimizer::BindLabel(BytecodeLabel* label) {
+  FlushState();
+  next_stage_->BindLabel(label);
+}
+
+// override
+void BytecodeRegisterOptimizer::BindLabel(const BytecodeLabel& target,
+                                          BytecodeLabel* label) {
+  // There is no need to flush here, it will have been flushed when |target|
+  // was bound.
+  next_stage_->BindLabel(target, label);
+}
+
+void BytecodeRegisterOptimizer::FlushState() {
+  if (!flush_required_) {
+    return;
+  }
+
+  // Materialize all live registers and break equivalences.
+  size_t count = register_info_table_.size();
+  for (size_t i = 0; i < count; ++i) {
+    RegisterInfo* reg_info = register_info_table_[i];
+    if (reg_info->materialized()) {
+      // Walk equivalents of materialized registers, materializing
+      // each equivalent register as necessary and placing in their
+      // own equivalence set.
+      RegisterInfo* equivalent;
+      while ((equivalent = reg_info->GetEquivalent()) != reg_info) {
+        if (!equivalent->materialized()) {
+          OutputRegisterTransfer(reg_info, equivalent);
+        }
+        equivalent->MoveToNewEquivalenceSet(NextEquivalenceId(), true);
+      }
+    }
+  }
+
+  flush_required_ = false;
+}
+
+void BytecodeRegisterOptimizer::WriteToNextStage(BytecodeNode* node) const {
+  next_stage_->Write(node);
+}
+
+void BytecodeRegisterOptimizer::WriteToNextStage(
+    BytecodeNode* node, const BytecodeSourceInfo& source_info) const {
+  if (source_info.is_valid()) {
+    node->source_info().Clone(source_info);
+  }
+  next_stage_->Write(node);
+}
+
+void BytecodeRegisterOptimizer::OutputRegisterTransfer(
+    RegisterInfo* input_info, RegisterInfo* output_info,
+    const BytecodeSourceInfo& source_info) {
+  Register input = input_info->register_value();
+  Register output = output_info->register_value();
+  DCHECK_NE(input.index(), output.index());
+
+  if (input == accumulator_) {
+    uint32_t operand = static_cast<uint32_t>(output.ToOperand());
+    BytecodeNode node(Bytecode::kStar, operand);
+    WriteToNextStage(&node, source_info);
+  } else if (output == accumulator_) {
+    uint32_t operand = static_cast<uint32_t>(input.ToOperand());
+    BytecodeNode node(Bytecode::kLdar, operand);
+    WriteToNextStage(&node, source_info);
+  } else {
+    uint32_t operand0 = static_cast<uint32_t>(input.ToOperand());
+    uint32_t operand1 = static_cast<uint32_t>(output.ToOperand());
+    BytecodeNode node(Bytecode::kMov, operand0, operand1);
+    WriteToNextStage(&node, source_info);
+  }
+  output_info->set_materialized(true);
+}
+
+void BytecodeRegisterOptimizer::CreateMaterializedEquivalent(
+    RegisterInfo* info) {
+  DCHECK(info->materialized());
+  RegisterInfo* unmaterialized = info->GetEquivalentToMaterialize();
+  if (unmaterialized) {
+    OutputRegisterTransfer(info, unmaterialized);
+  }
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::GetMaterializedEquivalent(RegisterInfo* info) {
+  return info->materialized() ? info : info->GetMaterializedEquivalent();
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::GetMaterializedEquivalentNotAccumulator(
+    RegisterInfo* info) {
+  if (info->materialized()) {
+    return info;
+  }
+
+  RegisterInfo* result = info->GetMaterializedEquivalentOtherThan(accumulator_);
+  if (result == nullptr) {
+    Materialize(info);
+    result = info;
+  }
+  DCHECK(result->register_value() != accumulator_);
+  return result;
+}
+
+void BytecodeRegisterOptimizer::Materialize(RegisterInfo* info) {
+  if (!info->materialized()) {
+    RegisterInfo* materialized = info->GetMaterializedEquivalent();
+    OutputRegisterTransfer(materialized, info);
+  }
+}
+
+void BytecodeRegisterOptimizer::AddToEquivalenceSet(
+    RegisterInfo* set_member, RegisterInfo* non_set_member) {
+  non_set_member->AddToEquivalenceSetOf(set_member);
+  // Flushing is only required when two or more registers are placed
+  // in the same equivalence set.
+  flush_required_ = true;
+}
+
+void BytecodeRegisterOptimizer::RegisterTransfer(
+    RegisterInfo* input_info, RegisterInfo* output_info,
+    const BytecodeSourceInfo& source_info) {
+  // Materialize an alternate in the equivalence set that
+  // |output_info| is leaving.
+  if (output_info->materialized()) {
+    CreateMaterializedEquivalent(output_info);
+  }
+
+  // Add |output_info| to new equivalence set.
+  if (!output_info->IsInSameEquivalenceSet(input_info)) {
+    AddToEquivalenceSet(input_info, output_info);
+  }
+
+  bool output_is_observable =
+      RegisterIsObservable(output_info->register_value());
+  if (output_is_observable) {
+    // Force store to be emitted when register is observable.
+    output_info->set_materialized(false);
+    RegisterInfo* materialized_info = input_info->GetMaterializedEquivalent();
+    OutputRegisterTransfer(materialized_info, output_info, source_info);
+  } else if (source_info.is_valid()) {
+    // Emit a placeholder nop to maintain source position info.
+    EmitNopForSourceInfo(source_info);
+  }
+}
+
+void BytecodeRegisterOptimizer::EmitNopForSourceInfo(
+    const BytecodeSourceInfo& source_info) const {
+  DCHECK(source_info.is_valid());
+  BytecodeNode nop(Bytecode::kNop);
+  nop.source_info().Clone(source_info);
+  WriteToNextStage(&nop);
+}
+
+void BytecodeRegisterOptimizer::DoLdar(const BytecodeNode* const node) {
+  Register input = GetRegisterInputOperand(
+      0, node->bytecode(), node->operands(), node->operand_count());
+  RegisterInfo* input_info = GetRegisterInfo(input);
+  RegisterTransfer(input_info, accumulator_info_, node->source_info());
+}
+
+void BytecodeRegisterOptimizer::DoMov(const BytecodeNode* const node) {
+  Register input = GetRegisterInputOperand(
+      0, node->bytecode(), node->operands(), node->operand_count());
+  RegisterInfo* input_info = GetRegisterInfo(input);
+  Register output = GetRegisterOutputOperand(
+      1, node->bytecode(), node->operands(), node->operand_count());
+  RegisterInfo* output_info = GetOrCreateRegisterInfo(output);
+  RegisterTransfer(input_info, output_info, node->source_info());
+}
+
+void BytecodeRegisterOptimizer::DoStar(const BytecodeNode* const node) {
+  Register output = GetRegisterOutputOperand(
+      0, node->bytecode(), node->operands(), node->operand_count());
+  RegisterInfo* output_info = GetOrCreateRegisterInfo(output);
+  RegisterTransfer(accumulator_info_, output_info, node->source_info());
+}
+
+void BytecodeRegisterOptimizer::PrepareRegisterOutputOperand(
+    RegisterInfo* reg_info) {
+  if (reg_info->materialized()) {
+    CreateMaterializedEquivalent(reg_info);
+  }
+  reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), true);
+}
+
+void BytecodeRegisterOptimizer::PrepareRegisterRangeOutputOperand(
+    Register start, int count) {
+  for (int i = 0; i < count; ++i) {
+    Register reg(start.index() + i);
+    RegisterInfo* reg_info = GetOrCreateRegisterInfo(reg);
+    PrepareRegisterOutputOperand(reg_info);
+  }
+}
+
+Register BytecodeRegisterOptimizer::GetEquivalentRegisterForInputOperand(
+    Register reg) {
+  // For a temporary register, RegInfo state may need be created. For
+  // locals and parameters, the RegInfo state is created in the
+  // BytecodeRegisterOptimizer constructor.
+  RegisterInfo* reg_info = GetOrCreateRegisterInfo(reg);
+  if (reg_info->materialized()) {
+    return reg;
+  } else {
+    RegisterInfo* equivalent_info =
+        GetMaterializedEquivalentNotAccumulator(reg_info);
+    return equivalent_info->register_value();
+  }
+}
+
+void BytecodeRegisterOptimizer::PrepareRegisterInputOperand(
+    BytecodeNode* const node, Register reg, int operand_index) {
+  Register equivalent = GetEquivalentRegisterForInputOperand(reg);
+  node->operands()[operand_index] =
+      static_cast<uint32_t>(equivalent.ToOperand());
+}
+
+void BytecodeRegisterOptimizer::PrepareRegisterRangeInputOperand(Register start,
+                                                                 int count) {
+  for (int i = 0; i < count; ++i) {
+    Register current(start.index() + i);
+    RegisterInfo* input_info = GetRegisterInfo(current);
+    Materialize(input_info);
+  }
+}
+
+void BytecodeRegisterOptimizer::PrepareRegisterOperands(
+    BytecodeNode* const node) {
+  //
+  // For each input operand, get a materialized equivalent if it is
+  // just a single register, otherwise materialize register range.
+  // Update operand_scale if necessary.
+  //
+  // For each output register about to be clobbered, materialize an
+  // equivalent if it exists. Put each register in it's own equivalence set.
+  //
+  int register_operand_bitmap =
+      Bytecodes::GetRegisterOperandBitmap(node->bytecode());
+  const OperandType* operand_types =
+      Bytecodes::GetOperandTypes(node->bytecode());
+  uint32_t* operands = node->operands();
+  for (int i = 0; register_operand_bitmap != 0;
+       ++i, register_operand_bitmap >>= 1) {
+    if ((register_operand_bitmap & 1) == 0) {
+      continue;
+    }
+    OperandType operand_type = operand_types[i];
+    int count = 0;
+    if (operand_types[i + 1] == OperandType::kRegCount) {
+      count = static_cast<int>(operands[i + 1]);
+      if (count == 0) {
+        continue;
+      }
+    } else {
+      count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type);
+    }
+
+    Register reg = Register::FromOperand(static_cast<int32_t>(operands[i]));
+    if (Bytecodes::IsRegisterInputOperandType(operand_type)) {
+      if (count == 1) {
+        PrepareRegisterInputOperand(node, reg, i);
+      } else if (count > 1) {
+        PrepareRegisterRangeInputOperand(reg, count);
+      }
+    } else if (Bytecodes::IsRegisterOutputOperandType(operand_type)) {
+      PrepareRegisterRangeOutputOperand(reg, count);
+    }
+  }
+}
+
+void BytecodeRegisterOptimizer::PrepareAccumulator(BytecodeNode* const node) {
+  // Materialize the accumulator if it is read by the bytecode. The
+  // accumulator is special and no other register can be materialized
+  // in it's place.
+  if (Bytecodes::ReadsAccumulator(node->bytecode()) &&
+      !accumulator_info_->materialized()) {
+    Materialize(accumulator_info_);
+  }
+
+  // Materialize an equivalent to the accumulator if it will be
+  // clobbered when the bytecode is dispatched.
+  if (Bytecodes::WritesAccumulator(node->bytecode())) {
+    PrepareRegisterOutputOperand(accumulator_info_);
+  }
+}
+
+void BytecodeRegisterOptimizer::PrepareOperands(BytecodeNode* const node) {
+  PrepareAccumulator(node);
+  PrepareRegisterOperands(node);
+}
+
+// static
+Register BytecodeRegisterOptimizer::GetRegisterInputOperand(
+    int index, Bytecode bytecode, const uint32_t* operands, int operand_count) {
+  DCHECK_LT(index, operand_count);
+  DCHECK(Bytecodes::IsRegisterInputOperandType(
+      Bytecodes::GetOperandType(bytecode, index)));
+  return OperandToRegister(operands[index]);
+}
+
+// static
+Register BytecodeRegisterOptimizer::GetRegisterOutputOperand(
+    int index, Bytecode bytecode, const uint32_t* operands, int operand_count) {
+  DCHECK_LT(index, operand_count);
+  DCHECK(Bytecodes::IsRegisterOutputOperandType(
+      Bytecodes::GetOperandType(bytecode, index)));
+  return OperandToRegister(operands[index]);
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::GetRegisterInfo(Register reg) {
+  size_t index = GetRegisterInfoTableIndex(reg);
+  return (index < register_info_table_.size()) ? register_info_table_[index]
+                                               : nullptr;
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::GetOrCreateRegisterInfo(Register reg) {
+  size_t index = GetRegisterInfoTableIndex(reg);
+  return index < register_info_table_.size() ? register_info_table_[index]
+                                             : NewRegisterInfo(reg);
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::NewRegisterInfo(Register reg) {
+  size_t index = GetRegisterInfoTableIndex(reg);
+  DCHECK_GE(index, register_info_table_.size());
+  GrowRegisterMap(reg);
+  return register_info_table_[index];
+}
+
+void BytecodeRegisterOptimizer::GrowRegisterMap(Register reg) {
+  DCHECK(RegisterIsTemporary(reg));
+  size_t index = GetRegisterInfoTableIndex(reg);
+  DCHECK_GE(index, register_info_table_.size());
+  size_t new_size = index + 1;
+  size_t old_size = register_info_table_.size();
+  register_info_table_.resize(new_size);
+  for (size_t i = old_size; i < new_size; ++i) {
+    register_info_table_[i] = new (zone()) RegisterInfo(
+        RegisterFromRegisterInfoTableIndex(i), NextEquivalenceId(), false);
+  }
+}
+
+void BytecodeRegisterOptimizer::TemporaryRegisterFreeEvent(Register reg) {
+  RegisterInfo* info = GetRegisterInfo(reg);
+  if (info != nullptr) {
+    // If register is materialized and part of equivalence set, make
+    // sure another member of the set holds the value before the
+    // temporary register is removed.
+    if (info->materialized()) {
+      CreateMaterializedEquivalent(info);
+    }
+    info->MoveToNewEquivalenceSet(kInvalidEquivalenceId, false);
+  }
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/bytecode-register-optimizer.h b/src/interpreter/bytecode-register-optimizer.h
new file mode 100644
index 0000000..4229610
--- /dev/null
+++ b/src/interpreter/bytecode-register-optimizer.h
@@ -0,0 +1,155 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_REGISTER_OPTIMIZER_H_
+#define V8_INTERPRETER_BYTECODE_REGISTER_OPTIMIZER_H_
+
+#include "src/interpreter/bytecode-pipeline.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// An optimization stage for eliminating unnecessary transfers between
+// registers. The bytecode generator uses temporary registers
+// liberally for correctness and convenience and this stage removes
+// transfers that are not required and preserves correctness.
+class BytecodeRegisterOptimizer final : public BytecodePipelineStage,
+                                        public TemporaryRegisterObserver,
+                                        public ZoneObject {
+ public:
+  BytecodeRegisterOptimizer(Zone* zone,
+                            TemporaryRegisterAllocator* register_allocator,
+                            int parameter_count,
+                            BytecodePipelineStage* next_stage);
+  virtual ~BytecodeRegisterOptimizer() {}
+
+  // BytecodePipelineStage interface.
+  void Write(BytecodeNode* node) override;
+  void WriteJump(BytecodeNode* node, BytecodeLabel* label) override;
+  void BindLabel(BytecodeLabel* label) override;
+  void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
+  Handle<BytecodeArray> ToBytecodeArray(
+      int fixed_register_count, int parameter_count,
+      Handle<FixedArray> handler_table) override;
+
+ private:
+  static const uint32_t kInvalidEquivalenceId = kMaxUInt32;
+
+  class RegisterInfo;
+
+  // TemporaryRegisterObserver interface.
+  void TemporaryRegisterFreeEvent(Register reg) override;
+
+  // Helpers for BytecodePipelineStage interface.
+  void FlushState();
+  void WriteToNextStage(BytecodeNode* node) const;
+  void WriteToNextStage(BytecodeNode* node,
+                        const BytecodeSourceInfo& output_info) const;
+
+  // Update internal state for register transfer from |input| to
+  // |output| using |source_info| as source position information if
+  // any bytecodes are emitted due to transfer.
+  void RegisterTransfer(RegisterInfo* input, RegisterInfo* output,
+                        const BytecodeSourceInfo& source_info);
+
+  // Emit a register transfer bytecode from |input| to |output|.
+  void OutputRegisterTransfer(
+      RegisterInfo* input, RegisterInfo* output,
+      const BytecodeSourceInfo& source_info = BytecodeSourceInfo());
+
+  // Emits a Nop to preserve source position information in the
+  // bytecode pipeline.
+  void EmitNopForSourceInfo(const BytecodeSourceInfo& source_info) const;
+
+  // Handlers for bytecode nodes for register to register transfers.
+  void DoLdar(const BytecodeNode* const node);
+  void DoMov(const BytecodeNode* const node);
+  void DoStar(const BytecodeNode* const node);
+
+  // Operand processing methods for bytecodes other than those
+  // performing register to register transfers.
+  void PrepareOperands(BytecodeNode* const node);
+  void PrepareAccumulator(BytecodeNode* const node);
+  void PrepareRegisterOperands(BytecodeNode* const node);
+
+  void PrepareRegisterOutputOperand(RegisterInfo* reg_info);
+  void PrepareRegisterRangeOutputOperand(Register start, int count);
+  void PrepareRegisterInputOperand(BytecodeNode* const node, Register reg,
+                                   int operand_index);
+  void PrepareRegisterRangeInputOperand(Register start, int count);
+
+  Register GetEquivalentRegisterForInputOperand(Register reg);
+
+  static Register GetRegisterInputOperand(int index, Bytecode bytecode,
+                                          const uint32_t* operands,
+                                          int operand_count);
+  static Register GetRegisterOutputOperand(int index, Bytecode bytecode,
+                                           const uint32_t* operands,
+                                           int operand_count);
+
+  void CreateMaterializedEquivalent(RegisterInfo* info);
+  RegisterInfo* GetMaterializedEquivalent(RegisterInfo* info);
+  RegisterInfo* GetMaterializedEquivalentNotAccumulator(RegisterInfo* info);
+  void Materialize(RegisterInfo* info);
+  void AddToEquivalenceSet(RegisterInfo* set_member,
+                           RegisterInfo* non_set_member);
+
+  // Methods for finding and creating metadata for each register.
+  RegisterInfo* GetOrCreateRegisterInfo(Register reg);
+  RegisterInfo* GetRegisterInfo(Register reg);
+  RegisterInfo* NewRegisterInfo(Register reg);
+  void GrowRegisterMap(Register reg);
+
+  bool RegisterIsTemporary(Register reg) const {
+    return reg >= temporary_base_;
+  }
+
+  bool RegisterIsObservable(Register reg) const {
+    return reg != accumulator_ && !RegisterIsTemporary(reg);
+  }
+
+  static Register OperandToRegister(uint32_t operand) {
+    return Register::FromOperand(static_cast<int32_t>(operand));
+  }
+
+  size_t GetRegisterInfoTableIndex(Register reg) const {
+    return static_cast<size_t>(reg.index() + register_info_table_offset_);
+  }
+
+  Register RegisterFromRegisterInfoTableIndex(size_t index) const {
+    return Register(static_cast<int>(index) - register_info_table_offset_);
+  }
+
+  uint32_t NextEquivalenceId() {
+    equivalence_id_++;
+    CHECK_NE(equivalence_id_, kInvalidEquivalenceId);
+    return equivalence_id_;
+  }
+
+  Zone* zone() { return zone_; }
+
+  const Register accumulator_;
+  RegisterInfo* accumulator_info_;
+  const Register temporary_base_;
+
+  // Direct mapping to register info.
+  ZoneVector<RegisterInfo*> register_info_table_;
+  int register_info_table_offset_;
+
+  // Counter for equivalence sets identifiers.
+  int equivalence_id_;
+
+  BytecodePipelineStage* next_stage_;
+  bool flush_required_;
+  Zone* zone_;
+
+  DISALLOW_COPY_AND_ASSIGN(BytecodeRegisterOptimizer);
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_BYTECODE_REGISTER_OPTIMIZER_H_
diff --git a/src/interpreter/bytecode-traits.h b/src/interpreter/bytecode-traits.h
index e7d1432..ea3d5d4 100644
--- a/src/interpreter/bytecode-traits.h
+++ b/src/interpreter/bytecode-traits.h
@@ -30,13 +30,15 @@
 
 template <OperandType>
 struct OperandTraits {
-  typedef OperandTypeInfoTraits<OperandTypeInfo::kNone> TypeInfo;
+  typedef OperandTypeInfoTraits<OperandTypeInfo::kNone> TypeInfoTraits;
+  static const OperandTypeInfo kOperandTypeInfo = OperandTypeInfo::kNone;
 };
 
-#define DECLARE_OPERAND_TYPE_TRAITS(Name, InfoType)   \
-  template <>                                         \
-  struct OperandTraits<OperandType::k##Name> {        \
-    typedef OperandTypeInfoTraits<InfoType> TypeInfo; \
+#define DECLARE_OPERAND_TYPE_TRAITS(Name, InfoType)           \
+  template <>                                                 \
+  struct OperandTraits<OperandType::k##Name> {                \
+    typedef OperandTypeInfoTraits<InfoType> TypeInfoTraits;   \
+    static const OperandTypeInfo kOperandTypeInfo = InfoType; \
   };
 OPERAND_TYPE_LIST(DECLARE_OPERAND_TYPE_TRAITS)
 #undef DECLARE_OPERAND_TYPE_TRAITS
@@ -57,8 +59,8 @@
   };
 
   static const int kSize =
-      Helper<OperandTraits<operand_type>::TypeInfo::kIsScalable,
-             OperandTraits<operand_type>::TypeInfo::kUnscaledSize,
+      Helper<OperandTraits<operand_type>::TypeInfoTraits::kIsScalable,
+             OperandTraits<operand_type>::TypeInfoTraits::kUnscaledSize,
              operand_scale>::kSize;
   static const OperandSize kOperandSize = static_cast<OperandSize>(kSize);
 };
@@ -89,7 +91,16 @@
     return operand_types;
   }
 
-  static OperandSize GetOperandSize(int i, OperandScale operand_scale) {
+  static const OperandTypeInfo* GetOperandTypeInfos() {
+    static const OperandTypeInfo operand_type_infos[] = {
+        OperandTraits<operand_0>::kOperandTypeInfo,
+        OperandTraits<operand_1>::kOperandTypeInfo,
+        OperandTraits<operand_2>::kOperandTypeInfo,
+        OperandTraits<operand_3>::kOperandTypeInfo, OperandTypeInfo::kNone};
+    return operand_type_infos;
+  }
+
+  static const OperandSize* GetOperandSizes(OperandScale operand_scale) {
     switch (operand_scale) {
 #define CASE(Name, _)                                                  \
   case OperandScale::k##Name: {                                        \
@@ -99,14 +110,13 @@
         OperandScaler<operand_2, OperandScale::k##Name>::kOperandSize, \
         OperandScaler<operand_3, OperandScale::k##Name>::kOperandSize, \
     };                                                                 \
-    DCHECK_LT(static_cast<size_t>(i), arraysize(kOperandSizes));       \
-    return kOperandSizes[i];                                           \
+    return kOperandSizes;                                              \
   }
       OPERAND_SCALE_LIST(CASE)
 #undef CASE
     }
     UNREACHABLE();
-    return OperandSize::kNone;
+    return nullptr;
   }
 
   template <OperandType ot>
@@ -116,10 +126,10 @@
   }
 
   static inline bool IsScalable() {
-    return (OperandTraits<operand_0>::TypeInfo::kIsScalable |
-            OperandTraits<operand_1>::TypeInfo::kIsScalable |
-            OperandTraits<operand_2>::TypeInfo::kIsScalable |
-            OperandTraits<operand_3>::TypeInfo::kIsScalable);
+    return (OperandTraits<operand_0>::TypeInfoTraits::kIsScalable |
+            OperandTraits<operand_1>::TypeInfoTraits::kIsScalable |
+            OperandTraits<operand_2>::TypeInfoTraits::kIsScalable |
+            OperandTraits<operand_3>::TypeInfoTraits::kIsScalable);
   }
 
   static const AccumulatorUse kAccumulatorUse = accumulator_use;
@@ -145,7 +155,15 @@
     return operand_types;
   }
 
-  static OperandSize GetOperandSize(int i, OperandScale operand_scale) {
+  static const OperandTypeInfo* GetOperandTypeInfos() {
+    static const OperandTypeInfo operand_type_infos[] = {
+        OperandTraits<operand_0>::kOperandTypeInfo,
+        OperandTraits<operand_1>::kOperandTypeInfo,
+        OperandTraits<operand_2>::kOperandTypeInfo, OperandTypeInfo::kNone};
+    return operand_type_infos;
+  }
+
+  static const OperandSize* GetOperandSizes(OperandScale operand_scale) {
     switch (operand_scale) {
 #define CASE(Name, _)                                                  \
   case OperandScale::k##Name: {                                        \
@@ -154,14 +172,13 @@
         OperandScaler<operand_1, OperandScale::k##Name>::kOperandSize, \
         OperandScaler<operand_2, OperandScale::k##Name>::kOperandSize, \
     };                                                                 \
-    DCHECK_LT(static_cast<size_t>(i), arraysize(kOperandSizes));       \
-    return kOperandSizes[i];                                           \
+    return kOperandSizes;                                              \
   }
       OPERAND_SCALE_LIST(CASE)
 #undef CASE
     }
     UNREACHABLE();
-    return OperandSize::kNone;
+    return nullptr;
   }
 
   template <OperandType ot>
@@ -170,9 +187,9 @@
   }
 
   static inline bool IsScalable() {
-    return (OperandTraits<operand_0>::TypeInfo::kIsScalable |
-            OperandTraits<operand_1>::TypeInfo::kIsScalable |
-            OperandTraits<operand_2>::TypeInfo::kIsScalable);
+    return (OperandTraits<operand_0>::TypeInfoTraits::kIsScalable |
+            OperandTraits<operand_1>::TypeInfoTraits::kIsScalable |
+            OperandTraits<operand_2>::TypeInfoTraits::kIsScalable);
   }
 
   static const AccumulatorUse kAccumulatorUse = accumulator_use;
@@ -196,7 +213,14 @@
     return operand_types;
   }
 
-  static OperandSize GetOperandSize(int i, OperandScale operand_scale) {
+  static const OperandTypeInfo* GetOperandTypeInfos() {
+    static const OperandTypeInfo operand_type_infos[] = {
+        OperandTraits<operand_0>::kOperandTypeInfo,
+        OperandTraits<operand_1>::kOperandTypeInfo, OperandTypeInfo::kNone};
+    return operand_type_infos;
+  }
+
+  static const OperandSize* GetOperandSizes(OperandScale operand_scale) {
     switch (operand_scale) {
 #define CASE(Name, _)                                                  \
   case OperandScale::k##Name: {                                        \
@@ -204,14 +228,13 @@
         OperandScaler<operand_0, OperandScale::k##Name>::kOperandSize, \
         OperandScaler<operand_1, OperandScale::k##Name>::kOperandSize, \
     };                                                                 \
-    DCHECK_LT(static_cast<size_t>(i), arraysize(kOperandSizes));       \
-    return kOperandSizes[i];                                           \
+    return kOperandSizes;                                              \
   }
       OPERAND_SCALE_LIST(CASE)
 #undef CASE
     }
     UNREACHABLE();
-    return OperandSize::kNone;
+    return nullptr;
   }
 
   template <OperandType ot>
@@ -220,8 +243,8 @@
   }
 
   static inline bool IsScalable() {
-    return (OperandTraits<operand_0>::TypeInfo::kIsScalable |
-            OperandTraits<operand_1>::TypeInfo::kIsScalable);
+    return (OperandTraits<operand_0>::TypeInfoTraits::kIsScalable |
+            OperandTraits<operand_1>::TypeInfoTraits::kIsScalable);
   }
 
   static const AccumulatorUse kAccumulatorUse = accumulator_use;
@@ -241,21 +264,26 @@
     return operand_types;
   }
 
-  static OperandSize GetOperandSize(int i, OperandScale operand_scale) {
+  static const OperandTypeInfo* GetOperandTypeInfos() {
+    static const OperandTypeInfo operand_type_infos[] = {
+        OperandTraits<operand_0>::kOperandTypeInfo, OperandTypeInfo::kNone};
+    return operand_type_infos;
+  }
+
+  static const OperandSize* GetOperandSizes(OperandScale operand_scale) {
     switch (operand_scale) {
 #define CASE(Name, _)                                                  \
   case OperandScale::k##Name: {                                        \
     static const OperandSize kOperandSizes[] = {                       \
         OperandScaler<operand_0, OperandScale::k##Name>::kOperandSize, \
     };                                                                 \
-    DCHECK_LT(static_cast<size_t>(i), arraysize(kOperandSizes));       \
-    return kOperandSizes[i];                                           \
+    return kOperandSizes;                                              \
   }
       OPERAND_SCALE_LIST(CASE)
 #undef CASE
     }
     UNREACHABLE();
-    return OperandSize::kNone;
+    return nullptr;
   }
 
   template <OperandType ot>
@@ -264,7 +292,7 @@
   }
 
   static inline bool IsScalable() {
-    return OperandTraits<operand_0>::TypeInfo::kIsScalable;
+    return OperandTraits<operand_0>::TypeInfoTraits::kIsScalable;
   }
 
   static const AccumulatorUse kAccumulatorUse = accumulator_use;
@@ -282,9 +310,14 @@
     return operand_types;
   }
 
-  static OperandSize GetOperandSize(int i, OperandScale operand_scale) {
-    UNREACHABLE();
-    return OperandSize::kNone;
+  static const OperandTypeInfo* GetOperandTypeInfos() {
+    static const OperandTypeInfo operand_type_infos[] = {
+        OperandTypeInfo::kNone};
+    return operand_type_infos;
+  }
+
+  static const OperandSize* GetOperandSizes(OperandScale operand_scale) {
+    return nullptr;
   }
 
   template <OperandType ot>
diff --git a/src/interpreter/bytecodes.cc b/src/interpreter/bytecodes.cc
index 5a67847..44c5138 100644
--- a/src/interpreter/bytecodes.cc
+++ b/src/interpreter/bytecodes.cc
@@ -6,6 +6,7 @@
 
 #include <iomanip>
 
+#include "src/base/bits.h"
 #include "src/frames.h"
 #include "src/interpreter/bytecode-traits.h"
 #include "src/interpreter/interpreter.h"
@@ -100,14 +101,6 @@
   return "";
 }
 
-
-// static
-uint8_t Bytecodes::ToByte(Bytecode bytecode) {
-  DCHECK(bytecode <= Bytecode::kLast);
-  return static_cast<uint8_t>(bytecode);
-}
-
-
 // static
 Bytecode Bytecodes::FromByte(uint8_t value) {
   Bytecode bytecode = static_cast<Bytecode>(value);
@@ -115,7 +108,6 @@
   return bytecode;
 }
 
-
 // static
 Bytecode Bytecodes::GetDebugBreak(Bytecode bytecode) {
   DCHECK(!IsDebugBreak(bytecode));
@@ -148,7 +140,6 @@
   return size;
 }
 
-
 // static
 size_t Bytecodes::ReturnCount(Bytecode bytecode) {
   return bytecode == Bytecode::kReturn ? 1 : 0;
@@ -168,7 +159,6 @@
   return 0;
 }
 
-
 // static
 int Bytecodes::NumberOfRegisterOperands(Bytecode bytecode) {
   DCHECK(bytecode <= Bytecode::kLast);
@@ -285,6 +275,34 @@
 }
 
 // static
+bool Bytecodes::IsJumpWithoutEffects(Bytecode bytecode) {
+  return IsJump(bytecode) && !IsJumpIfToBoolean(bytecode);
+}
+
+// static
+bool Bytecodes::IsRegisterLoadWithoutEffects(Bytecode bytecode) {
+  switch (bytecode) {
+    case Bytecode::kMov:
+    case Bytecode::kPopContext:
+    case Bytecode::kPushContext:
+    case Bytecode::kStar:
+    case Bytecode::kLdrUndefined:
+      return true;
+    default:
+      return false;
+  }
+}
+
+// static
+bool Bytecodes::IsWithoutExternalSideEffects(Bytecode bytecode) {
+  // These bytecodes only manipulate interpreter frame state and will
+  // never throw.
+  return (IsAccumulatorLoadWithoutEffects(bytecode) ||
+          IsRegisterLoadWithoutEffects(bytecode) ||
+          bytecode == Bytecode::kNop || IsJumpWithoutEffects(bytecode));
+}
+
+// static
 OperandType Bytecodes::GetOperandType(Bytecode bytecode, int i) {
   DCHECK_LE(bytecode, Bytecode::kLast);
   DCHECK_LT(i, NumberOfOperands(bytecode));
@@ -307,18 +325,39 @@
 }
 
 // static
-OperandSize Bytecodes::GetOperandSize(Bytecode bytecode, int i,
-                                      OperandScale operand_scale) {
+const OperandTypeInfo* Bytecodes::GetOperandTypeInfos(Bytecode bytecode) {
   DCHECK(bytecode <= Bytecode::kLast);
   switch (bytecode) {
 #define CASE(Name, ...)   \
   case Bytecode::k##Name: \
-    return BytecodeTraits<__VA_ARGS__>::GetOperandSize(i, operand_scale);
+    return BytecodeTraits<__VA_ARGS__>::GetOperandTypeInfos();
     BYTECODE_LIST(CASE)
 #undef CASE
   }
   UNREACHABLE();
-  return OperandSize::kNone;
+  return nullptr;
+}
+
+// static
+OperandSize Bytecodes::GetOperandSize(Bytecode bytecode, int i,
+                                      OperandScale operand_scale) {
+  DCHECK_LT(i, NumberOfOperands(bytecode));
+  return GetOperandSizes(bytecode, operand_scale)[i];
+}
+
+// static
+const OperandSize* Bytecodes::GetOperandSizes(Bytecode bytecode,
+                                              OperandScale operand_scale) {
+  DCHECK(bytecode <= Bytecode::kLast);
+  switch (bytecode) {
+#define CASE(Name, ...)   \
+  case Bytecode::k##Name: \
+    return BytecodeTraits<__VA_ARGS__>::GetOperandSizes(operand_scale);
+    BYTECODE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return nullptr;
 }
 
 // static
@@ -574,7 +613,7 @@
   switch (operand_type) {
 #define CASE(Name, _)        \
   case OperandType::k##Name: \
-    return OperandTraits<OperandType::k##Name>::TypeInfo::kIsUnsigned;
+    return OperandTraits<OperandType::k##Name>::TypeInfoTraits::kIsUnsigned;
     OPERAND_TYPE_LIST(CASE)
 #undef CASE
   }
@@ -584,9 +623,9 @@
 
 // static
 OperandSize Bytecodes::SizeForSignedOperand(int value) {
-  if (kMinInt8 <= value && value <= kMaxInt8) {
+  if (value >= kMinInt8 && value <= kMaxInt8) {
     return OperandSize::kByte;
-  } else if (kMinInt16 <= value && value <= kMaxInt16) {
+  } else if (value >= kMinInt16 && value <= kMaxInt16) {
     return OperandSize::kShort;
   } else {
     return OperandSize::kQuad;
@@ -594,8 +633,7 @@
 }
 
 // static
-OperandSize Bytecodes::SizeForUnsignedOperand(int value) {
-  DCHECK_GE(value, 0);
+OperandSize Bytecodes::SizeForUnsignedOperand(uint32_t value) {
   if (value <= kMaxUInt8) {
     return OperandSize::kByte;
   } else if (value <= kMaxUInt16) {
@@ -605,42 +643,6 @@
   }
 }
 
-OperandSize Bytecodes::SizeForUnsignedOperand(size_t value) {
-  if (value <= static_cast<size_t>(kMaxUInt8)) {
-    return OperandSize::kByte;
-  } else if (value <= static_cast<size_t>(kMaxUInt16)) {
-    return OperandSize::kShort;
-  } else if (value <= kMaxUInt32) {
-    return OperandSize::kQuad;
-  } else {
-    UNREACHABLE();
-    return OperandSize::kQuad;
-  }
-}
-
-OperandScale Bytecodes::OperandSizesToScale(OperandSize size0,
-                                            OperandSize size1,
-                                            OperandSize size2,
-                                            OperandSize size3) {
-  OperandSize upper = std::max(size0, size1);
-  OperandSize lower = std::max(size2, size3);
-  OperandSize result = std::max(upper, lower);
-  // Operand sizes have been scaled before calling this function.
-  // Currently all scalable operands are byte sized at
-  // OperandScale::kSingle.
-  STATIC_ASSERT(static_cast<int>(OperandSize::kByte) ==
-                    static_cast<int>(OperandScale::kSingle) &&
-                static_cast<int>(OperandSize::kShort) ==
-                    static_cast<int>(OperandScale::kDouble) &&
-                static_cast<int>(OperandSize::kQuad) ==
-                    static_cast<int>(OperandScale::kQuadruple));
-  OperandScale operand_scale = static_cast<OperandScale>(result);
-  DCHECK(operand_scale == OperandScale::kSingle ||
-         operand_scale == OperandScale::kDouble ||
-         operand_scale == OperandScale::kQuadruple);
-  return operand_scale;
-}
-
 // static
 Register Bytecodes::DecodeRegisterOperand(const uint8_t* operand_start,
                                           OperandType operand_type,
@@ -735,6 +737,7 @@
         break;
       case interpreter::OperandType::kIdx:
       case interpreter::OperandType::kRuntimeId:
+      case interpreter::OperandType::kIntrinsicId:
         os << "["
            << DecodeUnsignedOperand(operand_start, op_type, operand_scale)
            << "]";
@@ -829,6 +832,10 @@
     (InterpreterFrameConstants::kRegisterFileFromFp -
      InterpreterFrameConstants::kBytecodeOffsetFromFp) /
     kPointerSize;
+static const int kCallerPCOffsetRegisterIndex =
+    (InterpreterFrameConstants::kRegisterFileFromFp -
+     InterpreterFrameConstants::kCallerPCOffsetFromFp) /
+    kPointerSize;
 
 Register Register::FromParameterIndex(int index, int parameter_count) {
   DCHECK_GE(index, 0);
@@ -881,6 +888,11 @@
   return index() == kBytecodeOffsetRegisterIndex;
 }
 
+// static
+Register Register::virtual_accumulator() {
+  return Register(kCallerPCOffsetRegisterIndex);
+}
+
 OperandSize Register::SizeOfOperand() const {
   int32_t operand = ToOperand();
   if (operand >= kMinInt8 && operand <= kMaxInt8) {
diff --git a/src/interpreter/bytecodes.h b/src/interpreter/bytecodes.h
index d67a390..63a69f1 100644
--- a/src/interpreter/bytecodes.h
+++ b/src/interpreter/bytecodes.h
@@ -30,6 +30,7 @@
 
 #define SCALAR_OPERAND_TYPE_LIST(V)                   \
   V(Flag8, OperandTypeInfo::kFixedUnsignedByte)       \
+  V(IntrinsicId, OperandTypeInfo::kFixedUnsignedByte) \
   V(Idx, OperandTypeInfo::kScalableUnsignedByte)      \
   V(Imm, OperandTypeInfo::kScalableSignedByte)        \
   V(RegCount, OperandTypeInfo::kScalableUnsignedByte) \
@@ -73,188 +74,197 @@
   DEBUG_BREAK_PREFIX_BYTECODE_LIST(V)
 
 // The list of bytecodes which are interpreted by the interpreter.
-#define BYTECODE_LIST(V)                                                      \
-  /* Extended width operands */                                               \
-  V(Wide, AccumulatorUse::kNone)                                              \
-  V(ExtraWide, AccumulatorUse::kNone)                                         \
-                                                                              \
-  /* Loading the accumulator */                                               \
-  V(LdaZero, AccumulatorUse::kWrite)                                          \
-  V(LdaSmi, AccumulatorUse::kWrite, OperandType::kImm)                        \
-  V(LdaUndefined, AccumulatorUse::kWrite)                                     \
-  V(LdaNull, AccumulatorUse::kWrite)                                          \
-  V(LdaTheHole, AccumulatorUse::kWrite)                                       \
-  V(LdaTrue, AccumulatorUse::kWrite)                                          \
-  V(LdaFalse, AccumulatorUse::kWrite)                                         \
-  V(LdaConstant, AccumulatorUse::kWrite, OperandType::kIdx)                   \
-                                                                              \
-  /* Globals */                                                               \
-  V(LdaGlobal, AccumulatorUse::kWrite, OperandType::kIdx, OperandType::kIdx)  \
-  V(LdaGlobalInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx,         \
-    OperandType::kIdx)                                                        \
-  V(StaGlobalSloppy, AccumulatorUse::kRead, OperandType::kIdx,                \
-    OperandType::kIdx)                                                        \
-  V(StaGlobalStrict, AccumulatorUse::kRead, OperandType::kIdx,                \
-    OperandType::kIdx)                                                        \
-                                                                              \
-  /* Context operations */                                                    \
-  V(PushContext, AccumulatorUse::kRead, OperandType::kRegOut)                 \
-  V(PopContext, AccumulatorUse::kNone, OperandType::kReg)                     \
-  V(LdaContextSlot, AccumulatorUse::kWrite, OperandType::kReg,                \
-    OperandType::kIdx)                                                        \
-  V(StaContextSlot, AccumulatorUse::kRead, OperandType::kReg,                 \
-    OperandType::kIdx)                                                        \
-                                                                              \
-  /* Load-Store lookup slots */                                               \
-  V(LdaLookupSlot, AccumulatorUse::kWrite, OperandType::kIdx)                 \
-  V(LdaLookupSlotInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx)     \
-  V(StaLookupSlotSloppy, AccumulatorUse::kReadWrite, OperandType::kIdx)       \
-  V(StaLookupSlotStrict, AccumulatorUse::kReadWrite, OperandType::kIdx)       \
-                                                                              \
-  /* Register-accumulator transfers */                                        \
-  V(Ldar, AccumulatorUse::kWrite, OperandType::kReg)                          \
-  V(Star, AccumulatorUse::kRead, OperandType::kRegOut)                        \
-                                                                              \
-  /* Register-register transfers */                                           \
-  V(Mov, AccumulatorUse::kNone, OperandType::kReg, OperandType::kRegOut)      \
-                                                                              \
-  /* LoadIC operations */                                                     \
-  V(LoadIC, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kIdx,     \
-    OperandType::kIdx)                                                        \
-  V(KeyedLoadIC, AccumulatorUse::kReadWrite, OperandType::kReg,               \
-    OperandType::kIdx)                                                        \
-                                                                              \
-  /* StoreIC operations */                                                    \
-  V(StoreICSloppy, AccumulatorUse::kRead, OperandType::kReg,                  \
-    OperandType::kIdx, OperandType::kIdx)                                     \
-  V(StoreICStrict, AccumulatorUse::kRead, OperandType::kReg,                  \
-    OperandType::kIdx, OperandType::kIdx)                                     \
-  V(KeyedStoreICSloppy, AccumulatorUse::kRead, OperandType::kReg,             \
-    OperandType::kReg, OperandType::kIdx)                                     \
-  V(KeyedStoreICStrict, AccumulatorUse::kRead, OperandType::kReg,             \
-    OperandType::kReg, OperandType::kIdx)                                     \
-                                                                              \
-  /* Binary Operators */                                                      \
-  V(Add, AccumulatorUse::kReadWrite, OperandType::kReg)                       \
-  V(Sub, AccumulatorUse::kReadWrite, OperandType::kReg)                       \
-  V(Mul, AccumulatorUse::kReadWrite, OperandType::kReg)                       \
-  V(Div, AccumulatorUse::kReadWrite, OperandType::kReg)                       \
-  V(Mod, AccumulatorUse::kReadWrite, OperandType::kReg)                       \
-  V(BitwiseOr, AccumulatorUse::kReadWrite, OperandType::kReg)                 \
-  V(BitwiseXor, AccumulatorUse::kReadWrite, OperandType::kReg)                \
-  V(BitwiseAnd, AccumulatorUse::kReadWrite, OperandType::kReg)                \
-  V(ShiftLeft, AccumulatorUse::kReadWrite, OperandType::kReg)                 \
-  V(ShiftRight, AccumulatorUse::kReadWrite, OperandType::kReg)                \
-  V(ShiftRightLogical, AccumulatorUse::kReadWrite, OperandType::kReg)         \
-                                                                              \
-  /* Unary Operators */                                                       \
-  V(Inc, AccumulatorUse::kReadWrite)                                          \
-  V(Dec, AccumulatorUse::kReadWrite)                                          \
-  V(ToBooleanLogicalNot, AccumulatorUse::kReadWrite)                          \
-  V(LogicalNot, AccumulatorUse::kReadWrite)                                   \
-  V(TypeOf, AccumulatorUse::kReadWrite)                                       \
-  V(DeletePropertyStrict, AccumulatorUse::kReadWrite, OperandType::kReg)      \
-  V(DeletePropertySloppy, AccumulatorUse::kReadWrite, OperandType::kReg)      \
-                                                                              \
-  /* Call operations */                                                       \
-  V(Call, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg,       \
-    OperandType::kRegCount, OperandType::kIdx)                                \
-  V(TailCall, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg,   \
-    OperandType::kRegCount, OperandType::kIdx)                                \
-  V(CallRuntime, AccumulatorUse::kWrite, OperandType::kRuntimeId,             \
-    OperandType::kMaybeReg, OperandType::kRegCount)                           \
-  V(CallRuntimeForPair, AccumulatorUse::kNone, OperandType::kRuntimeId,       \
-    OperandType::kMaybeReg, OperandType::kRegCount, OperandType::kRegOutPair) \
-  V(CallJSRuntime, AccumulatorUse::kWrite, OperandType::kIdx,                 \
-    OperandType::kReg, OperandType::kRegCount)                                \
-                                                                              \
-  /* Intrinsics */                                                            \
-  V(InvokeIntrinsic, AccumulatorUse::kWrite, OperandType::kRuntimeId,         \
-    OperandType::kMaybeReg, OperandType::kRegCount)                           \
-                                                                              \
-  /* New operator */                                                          \
-  V(New, AccumulatorUse::kReadWrite, OperandType::kReg,                       \
-    OperandType::kMaybeReg, OperandType::kRegCount)                           \
-                                                                              \
-  /* Test Operators */                                                        \
-  V(TestEqual, AccumulatorUse::kReadWrite, OperandType::kReg)                 \
-  V(TestNotEqual, AccumulatorUse::kReadWrite, OperandType::kReg)              \
-  V(TestEqualStrict, AccumulatorUse::kReadWrite, OperandType::kReg)           \
-  V(TestLessThan, AccumulatorUse::kReadWrite, OperandType::kReg)              \
-  V(TestGreaterThan, AccumulatorUse::kReadWrite, OperandType::kReg)           \
-  V(TestLessThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg)       \
-  V(TestGreaterThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg)    \
-  V(TestInstanceOf, AccumulatorUse::kReadWrite, OperandType::kReg)            \
-  V(TestIn, AccumulatorUse::kReadWrite, OperandType::kReg)                    \
-                                                                              \
-  /* Cast operators */                                                        \
-  V(ToName, AccumulatorUse::kReadWrite)                                       \
-  V(ToNumber, AccumulatorUse::kReadWrite)                                     \
-  V(ToObject, AccumulatorUse::kReadWrite)                                     \
-                                                                              \
-  /* Literals */                                                              \
-  V(CreateRegExpLiteral, AccumulatorUse::kWrite, OperandType::kIdx,           \
-    OperandType::kIdx, OperandType::kFlag8)                                   \
-  V(CreateArrayLiteral, AccumulatorUse::kWrite, OperandType::kIdx,            \
-    OperandType::kIdx, OperandType::kFlag8)                                   \
-  V(CreateObjectLiteral, AccumulatorUse::kWrite, OperandType::kIdx,           \
-    OperandType::kIdx, OperandType::kFlag8)                                   \
-                                                                              \
-  /* Closure allocation */                                                    \
-  V(CreateClosure, AccumulatorUse::kWrite, OperandType::kIdx,                 \
-    OperandType::kFlag8)                                                      \
-                                                                              \
-  /* Arguments allocation */                                                  \
-  V(CreateMappedArguments, AccumulatorUse::kWrite)                            \
-  V(CreateUnmappedArguments, AccumulatorUse::kWrite)                          \
-  V(CreateRestParameter, AccumulatorUse::kWrite)                              \
-                                                                              \
-  /* Control Flow */                                                          \
-  V(Jump, AccumulatorUse::kNone, OperandType::kImm)                           \
-  V(JumpConstant, AccumulatorUse::kNone, OperandType::kIdx)                   \
-  V(JumpIfTrue, AccumulatorUse::kRead, OperandType::kImm)                     \
-  V(JumpIfTrueConstant, AccumulatorUse::kRead, OperandType::kIdx)             \
-  V(JumpIfFalse, AccumulatorUse::kRead, OperandType::kImm)                    \
-  V(JumpIfFalseConstant, AccumulatorUse::kRead, OperandType::kIdx)            \
-  V(JumpIfToBooleanTrue, AccumulatorUse::kRead, OperandType::kImm)            \
-  V(JumpIfToBooleanTrueConstant, AccumulatorUse::kRead, OperandType::kIdx)    \
-  V(JumpIfToBooleanFalse, AccumulatorUse::kRead, OperandType::kImm)           \
-  V(JumpIfToBooleanFalseConstant, AccumulatorUse::kRead, OperandType::kIdx)   \
-  V(JumpIfNull, AccumulatorUse::kRead, OperandType::kImm)                     \
-  V(JumpIfNullConstant, AccumulatorUse::kRead, OperandType::kIdx)             \
-  V(JumpIfUndefined, AccumulatorUse::kRead, OperandType::kImm)                \
-  V(JumpIfUndefinedConstant, AccumulatorUse::kRead, OperandType::kIdx)        \
-  V(JumpIfNotHole, AccumulatorUse::kRead, OperandType::kImm)                  \
-  V(JumpIfNotHoleConstant, AccumulatorUse::kRead, OperandType::kIdx)          \
-                                                                              \
-  /* Complex flow control For..in */                                          \
-  V(ForInPrepare, AccumulatorUse::kRead, OperandType::kRegOutTriple)          \
-  V(ForInDone, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg)  \
-  V(ForInNext, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg,  \
-    OperandType::kRegPair, OperandType::kIdx)                                 \
-  V(ForInStep, AccumulatorUse::kWrite, OperandType::kReg)                     \
-                                                                              \
-  /* Perform a stack guard check */                                           \
-  V(StackCheck, AccumulatorUse::kNone)                                        \
-                                                                              \
-  /* Non-local flow control */                                                \
-  V(Throw, AccumulatorUse::kRead)                                             \
-  V(ReThrow, AccumulatorUse::kRead)                                           \
-  V(Return, AccumulatorUse::kRead)                                            \
-                                                                              \
-  /* Generators */                                                            \
-  V(SuspendGenerator, AccumulatorUse::kRead, OperandType::kReg)               \
-  V(ResumeGenerator, AccumulatorUse::kWrite, OperandType::kReg)               \
-                                                                              \
-  /* Debugger */                                                              \
-  V(Debugger, AccumulatorUse::kNone)                                          \
-  DEBUG_BREAK_BYTECODE_LIST(V)                                                \
-                                                                              \
-  /* Illegal bytecode (terminates execution) */                               \
-  V(Illegal, AccumulatorUse::kNone)                                           \
-                                                                              \
-  /* No operation (used to maintain source positions for peephole */          \
-  /* eliminated bytecodes). */                                                \
+#define BYTECODE_LIST(V)                                                       \
+  /* Extended width operands */                                                \
+  V(Wide, AccumulatorUse::kNone)                                               \
+  V(ExtraWide, AccumulatorUse::kNone)                                          \
+                                                                               \
+  /* Loading the accumulator */                                                \
+  V(LdaZero, AccumulatorUse::kWrite)                                           \
+  V(LdaSmi, AccumulatorUse::kWrite, OperandType::kImm)                         \
+  V(LdaUndefined, AccumulatorUse::kWrite)                                      \
+  V(LdaNull, AccumulatorUse::kWrite)                                           \
+  V(LdaTheHole, AccumulatorUse::kWrite)                                        \
+  V(LdaTrue, AccumulatorUse::kWrite)                                           \
+  V(LdaFalse, AccumulatorUse::kWrite)                                          \
+  V(LdaConstant, AccumulatorUse::kWrite, OperandType::kIdx)                    \
+                                                                               \
+  /* Loading registers */                                                      \
+  V(LdrUndefined, AccumulatorUse::kNone, OperandType::kRegOut)                 \
+                                                                               \
+  /* Globals */                                                                \
+  V(LdaGlobal, AccumulatorUse::kWrite, OperandType::kIdx)                      \
+  V(LdrGlobal, AccumulatorUse::kNone, OperandType::kIdx, OperandType::kRegOut) \
+  V(LdaGlobalInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx)          \
+  V(StaGlobalSloppy, AccumulatorUse::kRead, OperandType::kIdx,                 \
+    OperandType::kIdx)                                                         \
+  V(StaGlobalStrict, AccumulatorUse::kRead, OperandType::kIdx,                 \
+    OperandType::kIdx)                                                         \
+                                                                               \
+  /* Context operations */                                                     \
+  V(PushContext, AccumulatorUse::kRead, OperandType::kRegOut)                  \
+  V(PopContext, AccumulatorUse::kNone, OperandType::kReg)                      \
+  V(LdaContextSlot, AccumulatorUse::kWrite, OperandType::kReg,                 \
+    OperandType::kIdx)                                                         \
+  V(LdrContextSlot, AccumulatorUse::kNone, OperandType::kReg,                  \
+    OperandType::kIdx, OperandType::kRegOut)                                   \
+  V(StaContextSlot, AccumulatorUse::kRead, OperandType::kReg,                  \
+    OperandType::kIdx)                                                         \
+                                                                               \
+  /* Load-Store lookup slots */                                                \
+  V(LdaLookupSlot, AccumulatorUse::kWrite, OperandType::kIdx)                  \
+  V(LdaLookupSlotInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx)      \
+  V(StaLookupSlotSloppy, AccumulatorUse::kReadWrite, OperandType::kIdx)        \
+  V(StaLookupSlotStrict, AccumulatorUse::kReadWrite, OperandType::kIdx)        \
+                                                                               \
+  /* Register-accumulator transfers */                                         \
+  V(Ldar, AccumulatorUse::kWrite, OperandType::kReg)                           \
+  V(Star, AccumulatorUse::kRead, OperandType::kRegOut)                         \
+                                                                               \
+  /* Register-register transfers */                                            \
+  V(Mov, AccumulatorUse::kNone, OperandType::kReg, OperandType::kRegOut)       \
+                                                                               \
+  /* Property loads (LoadIC) operations */                                     \
+  V(LdaNamedProperty, AccumulatorUse::kWrite, OperandType::kReg,               \
+    OperandType::kIdx, OperandType::kIdx)                                      \
+  V(LdrNamedProperty, AccumulatorUse::kNone, OperandType::kReg,                \
+    OperandType::kIdx, OperandType::kIdx, OperandType::kRegOut)                \
+  V(LdaKeyedProperty, AccumulatorUse::kReadWrite, OperandType::kReg,           \
+    OperandType::kIdx)                                                         \
+  V(LdrKeyedProperty, AccumulatorUse::kRead, OperandType::kReg,                \
+    OperandType::kIdx, OperandType::kRegOut)                                   \
+                                                                               \
+  /* Propery stores (StoreIC) operations */                                    \
+  V(StaNamedPropertySloppy, AccumulatorUse::kRead, OperandType::kReg,          \
+    OperandType::kIdx, OperandType::kIdx)                                      \
+  V(StaNamedPropertyStrict, AccumulatorUse::kRead, OperandType::kReg,          \
+    OperandType::kIdx, OperandType::kIdx)                                      \
+  V(StaKeyedPropertySloppy, AccumulatorUse::kRead, OperandType::kReg,          \
+    OperandType::kReg, OperandType::kIdx)                                      \
+  V(StaKeyedPropertyStrict, AccumulatorUse::kRead, OperandType::kReg,          \
+    OperandType::kReg, OperandType::kIdx)                                      \
+                                                                               \
+  /* Binary Operators */                                                       \
+  V(Add, AccumulatorUse::kReadWrite, OperandType::kReg)                        \
+  V(Sub, AccumulatorUse::kReadWrite, OperandType::kReg)                        \
+  V(Mul, AccumulatorUse::kReadWrite, OperandType::kReg)                        \
+  V(Div, AccumulatorUse::kReadWrite, OperandType::kReg)                        \
+  V(Mod, AccumulatorUse::kReadWrite, OperandType::kReg)                        \
+  V(BitwiseOr, AccumulatorUse::kReadWrite, OperandType::kReg)                  \
+  V(BitwiseXor, AccumulatorUse::kReadWrite, OperandType::kReg)                 \
+  V(BitwiseAnd, AccumulatorUse::kReadWrite, OperandType::kReg)                 \
+  V(ShiftLeft, AccumulatorUse::kReadWrite, OperandType::kReg)                  \
+  V(ShiftRight, AccumulatorUse::kReadWrite, OperandType::kReg)                 \
+  V(ShiftRightLogical, AccumulatorUse::kReadWrite, OperandType::kReg)          \
+                                                                               \
+  /* Unary Operators */                                                        \
+  V(Inc, AccumulatorUse::kReadWrite)                                           \
+  V(Dec, AccumulatorUse::kReadWrite)                                           \
+  V(ToBooleanLogicalNot, AccumulatorUse::kReadWrite)                           \
+  V(LogicalNot, AccumulatorUse::kReadWrite)                                    \
+  V(TypeOf, AccumulatorUse::kReadWrite)                                        \
+  V(DeletePropertyStrict, AccumulatorUse::kReadWrite, OperandType::kReg)       \
+  V(DeletePropertySloppy, AccumulatorUse::kReadWrite, OperandType::kReg)       \
+                                                                               \
+  /* Call operations */                                                        \
+  V(Call, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg,        \
+    OperandType::kRegCount, OperandType::kIdx)                                 \
+  V(TailCall, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg,    \
+    OperandType::kRegCount, OperandType::kIdx)                                 \
+  V(CallRuntime, AccumulatorUse::kWrite, OperandType::kRuntimeId,              \
+    OperandType::kMaybeReg, OperandType::kRegCount)                            \
+  V(CallRuntimeForPair, AccumulatorUse::kNone, OperandType::kRuntimeId,        \
+    OperandType::kMaybeReg, OperandType::kRegCount, OperandType::kRegOutPair)  \
+  V(CallJSRuntime, AccumulatorUse::kWrite, OperandType::kIdx,                  \
+    OperandType::kReg, OperandType::kRegCount)                                 \
+                                                                               \
+  /* Intrinsics */                                                             \
+  V(InvokeIntrinsic, AccumulatorUse::kWrite, OperandType::kIntrinsicId,        \
+    OperandType::kMaybeReg, OperandType::kRegCount)                            \
+                                                                               \
+  /* New operator */                                                           \
+  V(New, AccumulatorUse::kReadWrite, OperandType::kReg,                        \
+    OperandType::kMaybeReg, OperandType::kRegCount)                            \
+                                                                               \
+  /* Test Operators */                                                         \
+  V(TestEqual, AccumulatorUse::kReadWrite, OperandType::kReg)                  \
+  V(TestNotEqual, AccumulatorUse::kReadWrite, OperandType::kReg)               \
+  V(TestEqualStrict, AccumulatorUse::kReadWrite, OperandType::kReg)            \
+  V(TestLessThan, AccumulatorUse::kReadWrite, OperandType::kReg)               \
+  V(TestGreaterThan, AccumulatorUse::kReadWrite, OperandType::kReg)            \
+  V(TestLessThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg)        \
+  V(TestGreaterThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg)     \
+  V(TestInstanceOf, AccumulatorUse::kReadWrite, OperandType::kReg)             \
+  V(TestIn, AccumulatorUse::kReadWrite, OperandType::kReg)                     \
+                                                                               \
+  /* Cast operators */                                                         \
+  V(ToName, AccumulatorUse::kReadWrite)                                        \
+  V(ToNumber, AccumulatorUse::kReadWrite)                                      \
+  V(ToObject, AccumulatorUse::kReadWrite)                                      \
+                                                                               \
+  /* Literals */                                                               \
+  V(CreateRegExpLiteral, AccumulatorUse::kWrite, OperandType::kIdx,            \
+    OperandType::kIdx, OperandType::kFlag8)                                    \
+  V(CreateArrayLiteral, AccumulatorUse::kWrite, OperandType::kIdx,             \
+    OperandType::kIdx, OperandType::kFlag8)                                    \
+  V(CreateObjectLiteral, AccumulatorUse::kWrite, OperandType::kIdx,            \
+    OperandType::kIdx, OperandType::kFlag8)                                    \
+                                                                               \
+  /* Closure allocation */                                                     \
+  V(CreateClosure, AccumulatorUse::kWrite, OperandType::kIdx,                  \
+    OperandType::kFlag8)                                                       \
+                                                                               \
+  /* Arguments allocation */                                                   \
+  V(CreateMappedArguments, AccumulatorUse::kWrite)                             \
+  V(CreateUnmappedArguments, AccumulatorUse::kWrite)                           \
+  V(CreateRestParameter, AccumulatorUse::kWrite)                               \
+                                                                               \
+  /* Control Flow */                                                           \
+  V(Jump, AccumulatorUse::kNone, OperandType::kImm)                            \
+  V(JumpConstant, AccumulatorUse::kNone, OperandType::kIdx)                    \
+  V(JumpIfTrue, AccumulatorUse::kRead, OperandType::kImm)                      \
+  V(JumpIfTrueConstant, AccumulatorUse::kRead, OperandType::kIdx)              \
+  V(JumpIfFalse, AccumulatorUse::kRead, OperandType::kImm)                     \
+  V(JumpIfFalseConstant, AccumulatorUse::kRead, OperandType::kIdx)             \
+  V(JumpIfToBooleanTrue, AccumulatorUse::kRead, OperandType::kImm)             \
+  V(JumpIfToBooleanTrueConstant, AccumulatorUse::kRead, OperandType::kIdx)     \
+  V(JumpIfToBooleanFalse, AccumulatorUse::kRead, OperandType::kImm)            \
+  V(JumpIfToBooleanFalseConstant, AccumulatorUse::kRead, OperandType::kIdx)    \
+  V(JumpIfNull, AccumulatorUse::kRead, OperandType::kImm)                      \
+  V(JumpIfNullConstant, AccumulatorUse::kRead, OperandType::kIdx)              \
+  V(JumpIfUndefined, AccumulatorUse::kRead, OperandType::kImm)                 \
+  V(JumpIfUndefinedConstant, AccumulatorUse::kRead, OperandType::kIdx)         \
+  V(JumpIfNotHole, AccumulatorUse::kRead, OperandType::kImm)                   \
+  V(JumpIfNotHoleConstant, AccumulatorUse::kRead, OperandType::kIdx)           \
+                                                                               \
+  /* Complex flow control For..in */                                           \
+  V(ForInPrepare, AccumulatorUse::kRead, OperandType::kRegOutTriple)           \
+  V(ForInDone, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg)   \
+  V(ForInNext, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg,   \
+    OperandType::kRegPair, OperandType::kIdx)                                  \
+  V(ForInStep, AccumulatorUse::kWrite, OperandType::kReg)                      \
+                                                                               \
+  /* Perform a stack guard check */                                            \
+  V(StackCheck, AccumulatorUse::kNone)                                         \
+                                                                               \
+  /* Non-local flow control */                                                 \
+  V(Throw, AccumulatorUse::kRead)                                              \
+  V(ReThrow, AccumulatorUse::kRead)                                            \
+  V(Return, AccumulatorUse::kRead)                                             \
+                                                                               \
+  /* Generators */                                                             \
+  V(SuspendGenerator, AccumulatorUse::kRead, OperandType::kReg)                \
+  V(ResumeGenerator, AccumulatorUse::kWrite, OperandType::kReg)                \
+                                                                               \
+  /* Debugger */                                                               \
+  V(Debugger, AccumulatorUse::kNone)                                           \
+  DEBUG_BREAK_BYTECODE_LIST(V)                                                 \
+                                                                               \
+  /* Illegal bytecode (terminates execution) */                                \
+  V(Illegal, AccumulatorUse::kNone)                                            \
+                                                                               \
+  /* No operation (used to maintain source positions for peephole */           \
+  /* eliminated bytecodes). */                                                 \
   V(Nop, AccumulatorUse::kNone)
 
 enum class AccumulatorUse : uint8_t {
@@ -376,6 +386,11 @@
   static Register bytecode_offset();
   bool is_bytecode_offset() const;
 
+  // Returns a register that can be used to represent the accumulator
+  // within code in the interpreter, but should never be emitted in
+  // bytecode.
+  static Register virtual_accumulator();
+
   OperandSize SizeOfOperand() const;
 
   int32_t ToOperand() const { return kRegisterFileStartOffset - index_; }
@@ -442,7 +457,10 @@
   static const char* OperandSizeToString(OperandSize operand_size);
 
   // Returns byte value of bytecode.
-  static uint8_t ToByte(Bytecode bytecode);
+  static uint8_t ToByte(Bytecode bytecode) {
+    DCHECK_LE(bytecode, Bytecode::kLast);
+    return static_cast<uint8_t>(bytecode);
+  }
 
   // Returns bytecode for |value|.
   static Bytecode FromByte(uint8_t value);
@@ -476,10 +494,22 @@
   // Return true if |bytecode| writes the accumulator with a boolean value.
   static bool WritesBooleanToAccumulator(Bytecode bytecode);
 
-  // Return true if |bytecode| is an accumulator load bytecode,
+  // Return true if |bytecode| is an accumulator load without effects,
   // e.g. LdaConstant, LdaTrue, Ldar.
   static bool IsAccumulatorLoadWithoutEffects(Bytecode bytecode);
 
+  // Return true if |bytecode| is a jump without effects,
+  // e.g.  any jump excluding those that include type coercion like
+  // JumpIfTrueToBoolean.
+  static bool IsJumpWithoutEffects(Bytecode bytecode);
+
+  // Return true if |bytecode| is a register load without effects,
+  // e.g. Mov, Star, LdrUndefined.
+  static bool IsRegisterLoadWithoutEffects(Bytecode bytecode);
+
+  // Returns true if |bytecode| has no effects.
+  static bool IsWithoutExternalSideEffects(Bytecode bytecode);
+
   // Returns the i-th operand of |bytecode|.
   static OperandType GetOperandType(Bytecode bytecode, int i);
 
@@ -487,10 +517,18 @@
   // OperandType::kNone.
   static const OperandType* GetOperandTypes(Bytecode bytecode);
 
+  // Returns a pointer to an array of operand type info terminated in
+  // OperandTypeInfo::kNone.
+  static const OperandTypeInfo* GetOperandTypeInfos(Bytecode bytecode);
+
   // Returns the size of the i-th operand of |bytecode|.
   static OperandSize GetOperandSize(Bytecode bytecode, int i,
                                     OperandScale operand_scale);
 
+  // Returns a pointer to an array of the operand sizes for |bytecode|.
+  static const OperandSize* GetOperandSizes(Bytecode bytecode,
+                                            OperandScale operand_scale);
+
   // Returns the offset of the i-th operand of |bytecode| relative to the start
   // of the bytecode.
   static int GetOperandOffset(Bytecode bytecode, int i,
@@ -617,17 +655,7 @@
   static OperandSize SizeForSignedOperand(int value);
 
   // Return the operand size required to hold an unsigned operand.
-  static OperandSize SizeForUnsignedOperand(int value);
-
-  // Return the operand size required to hold an unsigned operand.
-  static OperandSize SizeForUnsignedOperand(size_t value);
-
-  // Return the OperandScale required for bytecode emission of
-  // operand sizes.
-  static OperandScale OperandSizesToScale(
-      OperandSize size0, OperandSize size1 = OperandSize::kByte,
-      OperandSize size2 = OperandSize::kByte,
-      OperandSize size3 = OperandSize::kByte);
+  static OperandSize SizeForUnsignedOperand(uint32_t value);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(Bytecodes);
diff --git a/src/interpreter/control-flow-builders.h b/src/interpreter/control-flow-builders.h
index 8778b26..b72d6d5 100644
--- a/src/interpreter/control-flow-builders.h
+++ b/src/interpreter/control-flow-builders.h
@@ -7,6 +7,7 @@
 
 #include "src/interpreter/bytecode-array-builder.h"
 
+#include "src/interpreter/bytecode-label.h"
 #include "src/zone-containers.h"
 
 namespace v8 {
diff --git a/src/interpreter/interpreter-assembler.cc b/src/interpreter/interpreter-assembler.cc
index 4e911eb..ee5f8be 100644
--- a/src/interpreter/interpreter-assembler.cc
+++ b/src/interpreter/interpreter-assembler.cc
@@ -31,6 +31,7 @@
                         Bytecodes::ReturnCount(bytecode)),
       bytecode_(bytecode),
       operand_scale_(operand_scale),
+      interpreted_frame_pointer_(this, MachineType::PointerRepresentation()),
       accumulator_(this, MachineRepresentation::kTagged),
       accumulator_use_(AccumulatorUse::kNone),
       made_call_(false),
@@ -50,6 +51,13 @@
   DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
 }
 
+Node* InterpreterAssembler::GetInterpretedFramePointer() {
+  if (!interpreted_frame_pointer_.IsBound()) {
+    interpreted_frame_pointer_.Bind(LoadParentFramePointer());
+  }
+  return interpreted_frame_pointer_.value();
+}
+
 Node* InterpreterAssembler::GetAccumulatorUnchecked() {
   return accumulator_.value();
 }
@@ -93,7 +101,8 @@
 }
 
 Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
-  return IntPtrAdd(LoadParentFramePointer(), RegisterFrameOffset(reg_index));
+  return IntPtrAdd(GetInterpretedFramePointer(),
+                   RegisterFrameOffset(reg_index));
 }
 
 Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
@@ -101,24 +110,24 @@
 }
 
 Node* InterpreterAssembler::LoadRegister(Register reg) {
-  return Load(MachineType::AnyTagged(), LoadParentFramePointer(),
+  return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
               IntPtrConstant(reg.ToOperand() << kPointerSizeLog2));
 }
 
 Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
-  return Load(MachineType::AnyTagged(), LoadParentFramePointer(),
+  return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
               RegisterFrameOffset(reg_index));
 }
 
 Node* InterpreterAssembler::StoreRegister(Node* value, Register reg) {
   return StoreNoWriteBarrier(
-      MachineRepresentation::kTagged, LoadParentFramePointer(),
+      MachineRepresentation::kTagged, GetInterpretedFramePointer(),
       IntPtrConstant(reg.ToOperand() << kPointerSizeLog2), value);
 }
 
 Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
   return StoreNoWriteBarrier(MachineRepresentation::kTagged,
-                             LoadParentFramePointer(),
+                             GetInterpretedFramePointer(),
                              RegisterFrameOffset(reg_index), value);
 }
 
@@ -363,6 +372,15 @@
   return BytecodeUnsignedOperand(operand_index, operand_size);
 }
 
+Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) {
+  DCHECK(OperandType::kIntrinsicId ==
+         Bytecodes::GetOperandType(bytecode_, operand_index));
+  OperandSize operand_size =
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+  DCHECK_EQ(operand_size, OperandSize::kByte);
+  return BytecodeUnsignedOperand(operand_index, operand_size);
+}
+
 Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
   Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
                                         BytecodeArray::kConstantPoolOffset);
@@ -394,10 +412,9 @@
 
 Node* InterpreterAssembler::LoadTypeFeedbackVector() {
   Node* function = LoadRegister(Register::function_closure());
-  Node* shared_info =
-      LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset);
+  Node* literals = LoadObjectField(function, JSFunction::kLiteralsOffset);
   Node* vector =
-      LoadObjectField(shared_info, SharedFunctionInfo::kFeedbackVectorOffset);
+      LoadObjectField(literals, LiteralsArray::kFeedbackVectorOffset);
   return vector;
 }
 
diff --git a/src/interpreter/interpreter-assembler.h b/src/interpreter/interpreter-assembler.h
index f8d4b7c..183d4dd 100644
--- a/src/interpreter/interpreter-assembler.h
+++ b/src/interpreter/interpreter-assembler.h
@@ -41,6 +41,9 @@
   // Returns the runtime id immediate for bytecode operand
   // |operand_index| in the current bytecode.
   compiler::Node* BytecodeOperandRuntimeId(int operand_index);
+  // Returns the intrinsic id immediate for bytecode operand
+  // |operand_index| in the current bytecode.
+  compiler::Node* BytecodeOperandIntrinsicId(int operand_index);
 
   // Accumulator.
   compiler::Node* GetAccumulator();
@@ -146,6 +149,9 @@
   void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
                            BailoutReason bailout_reason);
 
+  // Returns the offset from the BytecodeArrayPointer of the current bytecode.
+  compiler::Node* BytecodeOffset();
+
  protected:
   Bytecode bytecode() const { return bytecode_; }
   static bool TargetSupportsUnalignedAccess();
@@ -153,8 +159,7 @@
  private:
   // Returns a tagged pointer to the current function's BytecodeArray object.
   compiler::Node* BytecodeArrayTaggedPointer();
-  // Returns the offset from the BytecodeArrayPointer of the current bytecode.
-  compiler::Node* BytecodeOffset();
+
   // Returns a raw pointer to first entry in the interpreter dispatch table.
   compiler::Node* DispatchTableRawPointer();
 
@@ -163,6 +168,10 @@
   // tracing as these need to bypass accumulator use validity checks.
   compiler::Node* GetAccumulatorUnchecked();
 
+  // Returns the frame pointer for the interpreted frame of the function being
+  // interpreted.
+  compiler::Node* GetInterpretedFramePointer();
+
   // Saves and restores interpreter bytecode offset to the interpreter stack
   // frame when performing a call.
   void CallPrologue() override;
@@ -229,6 +238,7 @@
 
   Bytecode bytecode_;
   OperandScale operand_scale_;
+  CodeStubAssembler::Variable interpreted_frame_pointer_;
   CodeStubAssembler::Variable accumulator_;
   AccumulatorUse accumulator_use_;
   bool made_call_;
diff --git a/src/interpreter/interpreter-intrinsics.cc b/src/interpreter/interpreter-intrinsics.cc
index 6d9917d..109bf8e 100644
--- a/src/interpreter/interpreter-intrinsics.cc
+++ b/src/interpreter/interpreter-intrinsics.cc
@@ -4,6 +4,8 @@
 
 #include "src/interpreter/interpreter-intrinsics.h"
 
+#include "src/code-factory.h"
+
 namespace v8 {
 namespace internal {
 namespace interpreter {
@@ -13,8 +15,11 @@
 #define __ assembler_->
 
 IntrinsicsHelper::IntrinsicsHelper(InterpreterAssembler* assembler)
-    : assembler_(assembler) {}
+    : isolate_(assembler->isolate()),
+      zone_(assembler->zone()),
+      assembler_(assembler) {}
 
+// static
 bool IntrinsicsHelper::IsSupported(Runtime::FunctionId function_id) {
   switch (function_id) {
 #define SUPPORTED(name, lower_case, count) case Runtime::kInline##name:
@@ -26,6 +31,36 @@
   }
 }
 
+// static
+IntrinsicsHelper::IntrinsicId IntrinsicsHelper::FromRuntimeId(
+    Runtime::FunctionId function_id) {
+  switch (function_id) {
+#define TO_RUNTIME_ID(name, lower_case, count) \
+  case Runtime::kInline##name:                 \
+    return IntrinsicId::k##name;
+    INTRINSICS_LIST(TO_RUNTIME_ID)
+#undef TO_RUNTIME_ID
+    default:
+      UNREACHABLE();
+      return static_cast<IntrinsicsHelper::IntrinsicId>(-1);
+  }
+}
+
+// static
+Runtime::FunctionId IntrinsicsHelper::ToRuntimeId(
+    IntrinsicsHelper::IntrinsicId intrinsic_id) {
+  switch (intrinsic_id) {
+#define TO_INTRINSIC_ID(name, lower_case, count) \
+  case IntrinsicId::k##name:                     \
+    return Runtime::kInline##name;
+    INTRINSICS_LIST(TO_INTRINSIC_ID)
+#undef TO_INTRINSIC_ID
+    default:
+      UNREACHABLE();
+      return static_cast<Runtime::FunctionId>(-1);
+  }
+}
+
 Node* IntrinsicsHelper::InvokeIntrinsic(Node* function_id, Node* context,
                                         Node* first_arg_reg, Node* arg_count) {
   InterpreterAssembler::Label abort(assembler_), end(assembler_);
@@ -42,25 +77,27 @@
 #undef LABEL_POINTER
 
 #define CASE(name, lower_case, count) \
-  static_cast<int32_t>(Runtime::kInline##name),
+  static_cast<int32_t>(IntrinsicId::k##name),
   int32_t cases[] = {INTRINSICS_LIST(CASE)};
 #undef CASE
 
   __ Switch(function_id, &abort, cases, labels, arraysize(cases));
 #define HANDLE_CASE(name, lower_case, expected_arg_count)   \
   __ Bind(&lower_case);                                     \
-  if (FLAG_debug_code) {                                    \
+  if (FLAG_debug_code && expected_arg_count >= 0) {         \
     AbortIfArgCountMismatch(expected_arg_count, arg_count); \
   }                                                         \
-  result.Bind(name(first_arg_reg));                         \
+  result.Bind(name(first_arg_reg, arg_count, context));     \
   __ Goto(&end);
   INTRINSICS_LIST(HANDLE_CASE)
 #undef HANDLE_CASE
 
   __ Bind(&abort);
-  __ Abort(BailoutReason::kUnexpectedFunctionIDForInvokeIntrinsic);
-  result.Bind(__ UndefinedConstant());
-  __ Goto(&end);
+  {
+    __ Abort(BailoutReason::kUnexpectedFunctionIDForInvokeIntrinsic);
+    result.Bind(__ UndefinedConstant());
+    __ Goto(&end);
+  }
 
   __ Bind(&end);
   return result.value();
@@ -74,84 +111,246 @@
 
   InterpreterAssembler::Label if_true(assembler_), if_false(assembler_),
       end(assembler_);
-  Node* condition;
   if (mode == kInstanceTypeEqual) {
-    condition = __ Word32Equal(instance_type, __ Int32Constant(type));
+    return __ Word32Equal(instance_type, __ Int32Constant(type));
   } else {
     DCHECK(mode == kInstanceTypeGreaterThanOrEqual);
-    condition =
-        __ Int32GreaterThanOrEqual(instance_type, __ Int32Constant(type));
+    return __ Int32GreaterThanOrEqual(instance_type, __ Int32Constant(type));
   }
-  __ Branch(condition, &if_true, &if_false);
+}
 
-  __ Bind(&if_true);
-  return_value.Bind(__ BooleanConstant(true));
-  __ Goto(&end);
+Node* IntrinsicsHelper::IsInstanceType(Node* input, int type) {
+  InterpreterAssembler::Variable return_value(assembler_,
+                                              MachineRepresentation::kTagged);
+  InterpreterAssembler::Label if_not_smi(assembler_), return_true(assembler_),
+      return_false(assembler_), end(assembler_);
+  Node* arg = __ LoadRegister(input);
+  __ GotoIf(__ WordIsSmi(arg), &return_false);
 
-  __ Bind(&if_false);
-  return_value.Bind(__ BooleanConstant(false));
-  __ Goto(&end);
+  Node* condition = CompareInstanceType(arg, type, kInstanceTypeEqual);
+  __ Branch(condition, &return_true, &return_false);
+
+  __ Bind(&return_true);
+  {
+    return_value.Bind(__ BooleanConstant(true));
+    __ Goto(&end);
+  }
+
+  __ Bind(&return_false);
+  {
+    return_value.Bind(__ BooleanConstant(false));
+    __ Goto(&end);
+  }
 
   __ Bind(&end);
   return return_value.value();
 }
 
-Node* IntrinsicsHelper::IsJSReceiver(Node* input) {
+Node* IntrinsicsHelper::IsJSReceiver(Node* input, Node* arg_count,
+                                     Node* context) {
   InterpreterAssembler::Variable return_value(assembler_,
                                               MachineRepresentation::kTagged);
-
-  InterpreterAssembler::Label if_smi(assembler_), if_not_smi(assembler_),
+  InterpreterAssembler::Label return_true(assembler_), return_false(assembler_),
       end(assembler_);
+
   Node* arg = __ LoadRegister(input);
+  __ GotoIf(__ WordIsSmi(arg), &return_false);
 
-  __ Branch(__ WordIsSmi(arg), &if_smi, &if_not_smi);
-  __ Bind(&if_smi);
-  return_value.Bind(__ BooleanConstant(false));
-  __ Goto(&end);
-
-  __ Bind(&if_not_smi);
   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
-  return_value.Bind(CompareInstanceType(arg, FIRST_JS_RECEIVER_TYPE,
-                                        kInstanceTypeGreaterThanOrEqual));
-  __ Goto(&end);
+  Node* condition = CompareInstanceType(arg, FIRST_JS_RECEIVER_TYPE,
+                                        kInstanceTypeGreaterThanOrEqual);
+  __ Branch(condition, &return_true, &return_false);
+
+  __ Bind(&return_true);
+  {
+    return_value.Bind(__ BooleanConstant(true));
+    __ Goto(&end);
+  }
+
+  __ Bind(&return_false);
+  {
+    return_value.Bind(__ BooleanConstant(false));
+    __ Goto(&end);
+  }
 
   __ Bind(&end);
   return return_value.value();
 }
 
-Node* IntrinsicsHelper::IsArray(Node* input) {
+Node* IntrinsicsHelper::IsArray(Node* input, Node* arg_count, Node* context) {
+  return IsInstanceType(input, JS_ARRAY_TYPE);
+}
+
+Node* IntrinsicsHelper::IsJSProxy(Node* input, Node* arg_count, Node* context) {
+  return IsInstanceType(input, JS_PROXY_TYPE);
+}
+
+Node* IntrinsicsHelper::IsRegExp(Node* input, Node* arg_count, Node* context) {
+  return IsInstanceType(input, JS_REGEXP_TYPE);
+}
+
+Node* IntrinsicsHelper::IsTypedArray(Node* input, Node* arg_count,
+                                     Node* context) {
+  return IsInstanceType(input, JS_TYPED_ARRAY_TYPE);
+}
+
+Node* IntrinsicsHelper::IsSmi(Node* input, Node* arg_count, Node* context) {
   InterpreterAssembler::Variable return_value(assembler_,
                                               MachineRepresentation::kTagged);
-
   InterpreterAssembler::Label if_smi(assembler_), if_not_smi(assembler_),
       end(assembler_);
+
   Node* arg = __ LoadRegister(input);
 
   __ Branch(__ WordIsSmi(arg), &if_smi, &if_not_smi);
   __ Bind(&if_smi);
-  return_value.Bind(__ BooleanConstant(false));
-  __ Goto(&end);
+  {
+    return_value.Bind(__ BooleanConstant(true));
+    __ Goto(&end);
+  }
 
   __ Bind(&if_not_smi);
-  return_value.Bind(
-      CompareInstanceType(arg, JS_ARRAY_TYPE, kInstanceTypeEqual));
-  __ Goto(&end);
+  {
+    return_value.Bind(__ BooleanConstant(false));
+    __ Goto(&end);
+  }
 
   __ Bind(&end);
   return return_value.value();
 }
 
+Node* IntrinsicsHelper::IntrinsicAsStubCall(Node* args_reg, Node* context,
+                                            Callable const& callable) {
+  int param_count = callable.descriptor().GetParameterCount();
+  Node** args = zone()->NewArray<Node*>(param_count + 1);  // 1 for context
+  for (int i = 0; i < param_count; i++) {
+    args[i] = __ LoadRegister(args_reg);
+    args_reg = __ NextRegister(args_reg);
+  }
+  args[param_count] = context;
+
+  return __ CallStubN(callable, args);
+}
+
+Node* IntrinsicsHelper::HasProperty(Node* input, Node* arg_count,
+                                    Node* context) {
+  return IntrinsicAsStubCall(input, context,
+                             CodeFactory::HasProperty(isolate()));
+}
+
+Node* IntrinsicsHelper::MathPow(Node* input, Node* arg_count, Node* context) {
+  return IntrinsicAsStubCall(input, context, CodeFactory::MathPow(isolate()));
+}
+
+Node* IntrinsicsHelper::NewObject(Node* input, Node* arg_count, Node* context) {
+  return IntrinsicAsStubCall(input, context,
+                             CodeFactory::FastNewObject(isolate()));
+}
+
+Node* IntrinsicsHelper::NumberToString(Node* input, Node* arg_count,
+                                       Node* context) {
+  return IntrinsicAsStubCall(input, context,
+                             CodeFactory::NumberToString(isolate()));
+}
+
+Node* IntrinsicsHelper::RegExpConstructResult(Node* input, Node* arg_count,
+                                              Node* context) {
+  return IntrinsicAsStubCall(input, context,
+                             CodeFactory::RegExpConstructResult(isolate()));
+}
+
+Node* IntrinsicsHelper::RegExpExec(Node* input, Node* arg_count,
+                                   Node* context) {
+  return IntrinsicAsStubCall(input, context,
+                             CodeFactory::RegExpExec(isolate()));
+}
+
+Node* IntrinsicsHelper::SubString(Node* input, Node* arg_count, Node* context) {
+  return IntrinsicAsStubCall(input, context, CodeFactory::SubString(isolate()));
+}
+
+Node* IntrinsicsHelper::ToString(Node* input, Node* arg_count, Node* context) {
+  return IntrinsicAsStubCall(input, context, CodeFactory::ToString(isolate()));
+}
+
+Node* IntrinsicsHelper::ToName(Node* input, Node* arg_count, Node* context) {
+  return IntrinsicAsStubCall(input, context, CodeFactory::ToName(isolate()));
+}
+
+Node* IntrinsicsHelper::ToLength(Node* input, Node* arg_count, Node* context) {
+  return IntrinsicAsStubCall(input, context, CodeFactory::ToLength(isolate()));
+}
+
+Node* IntrinsicsHelper::ToInteger(Node* input, Node* arg_count, Node* context) {
+  return IntrinsicAsStubCall(input, context, CodeFactory::ToInteger(isolate()));
+}
+
+Node* IntrinsicsHelper::ToNumber(Node* input, Node* arg_count, Node* context) {
+  return IntrinsicAsStubCall(input, context, CodeFactory::ToNumber(isolate()));
+}
+
+Node* IntrinsicsHelper::ToObject(Node* input, Node* arg_count, Node* context) {
+  return IntrinsicAsStubCall(input, context, CodeFactory::ToObject(isolate()));
+}
+
+Node* IntrinsicsHelper::Call(Node* args_reg, Node* arg_count, Node* context) {
+  // First argument register contains the function target.
+  Node* function = __ LoadRegister(args_reg);
+
+  // Receiver is the second runtime call argument.
+  Node* receiver_reg = __ NextRegister(args_reg);
+  Node* receiver_arg = __ RegisterLocation(receiver_reg);
+
+  // Subtract function and receiver from arg count.
+  Node* function_and_receiver_count = __ Int32Constant(2);
+  Node* target_args_count = __ Int32Sub(arg_count, function_and_receiver_count);
+
+  if (FLAG_debug_code) {
+    InterpreterAssembler::Label arg_count_positive(assembler_);
+    Node* comparison = __ Int32LessThan(target_args_count, __ Int32Constant(0));
+    __ GotoUnless(comparison, &arg_count_positive);
+    __ Abort(kWrongArgumentCountForInvokeIntrinsic);
+    __ Goto(&arg_count_positive);
+    __ Bind(&arg_count_positive);
+  }
+
+  Node* result = __ CallJS(function, context, receiver_arg, target_args_count,
+                           TailCallMode::kDisallow);
+  return result;
+}
+
+Node* IntrinsicsHelper::ValueOf(Node* args_reg, Node* arg_count,
+                                Node* context) {
+  InterpreterAssembler::Variable return_value(assembler_,
+                                              MachineRepresentation::kTagged);
+  InterpreterAssembler::Label done(assembler_);
+
+  Node* object = __ LoadRegister(args_reg);
+  return_value.Bind(object);
+
+  // If the object is a smi return the object.
+  __ GotoIf(__ WordIsSmi(object), &done);
+
+  // If the object is not a value type, return the object.
+  Node* condition =
+      CompareInstanceType(object, JS_VALUE_TYPE, kInstanceTypeEqual);
+  __ GotoUnless(condition, &done);
+
+  // If the object is a value type, return the value field.
+  return_value.Bind(__ LoadObjectField(object, JSValue::kValueOffset));
+  __ Goto(&done);
+
+  __ Bind(&done);
+  return return_value.value();
+}
+
 void IntrinsicsHelper::AbortIfArgCountMismatch(int expected, Node* actual) {
-  InterpreterAssembler::Label match(assembler_), mismatch(assembler_),
-      end(assembler_);
+  InterpreterAssembler::Label match(assembler_);
   Node* comparison = __ Word32Equal(actual, __ Int32Constant(expected));
-  __ Branch(comparison, &match, &mismatch);
-  __ Bind(&mismatch);
+  __ GotoIf(comparison, &match);
   __ Abort(kWrongArgumentCountForInvokeIntrinsic);
-  __ Goto(&end);
+  __ Goto(&match);
   __ Bind(&match);
-  __ Goto(&end);
-  __ Bind(&end);
 }
 
 }  // namespace interpreter
diff --git a/src/interpreter/interpreter-intrinsics.h b/src/interpreter/interpreter-intrinsics.h
index e27c678..b1c0cdc 100644
--- a/src/interpreter/interpreter-intrinsics.h
+++ b/src/interpreter/interpreter-intrinsics.h
@@ -20,14 +20,43 @@
 class Node;
 }  // namespace compiler
 
-#define INTRINSICS_LIST(V)           \
-  V(IsJSReceiver, is_js_receiver, 1) \
-  V(IsArray, is_array, 1)
-
 namespace interpreter {
 
+// List of supported intrisics, with upper case name, lower case name and
+// expected number of arguments (-1 denoting argument count is variable).
+#define INTRINSICS_LIST(V)                              \
+  V(Call, call, -1)                                     \
+  V(HasProperty, has_property, 2)                       \
+  V(IsArray, is_array, 1)                               \
+  V(IsJSProxy, is_js_proxy, 1)                          \
+  V(IsJSReceiver, is_js_receiver, 1)                    \
+  V(IsRegExp, is_regexp, 1)                             \
+  V(IsSmi, is_smi, 1)                                   \
+  V(IsTypedArray, is_typed_array, 1)                    \
+  V(MathPow, math_pow, 2)                               \
+  V(NewObject, new_object, 2)                           \
+  V(NumberToString, number_to_string, 1)                \
+  V(RegExpConstructResult, reg_exp_construct_result, 3) \
+  V(RegExpExec, reg_exp_exec, 4)                        \
+  V(SubString, sub_string, 3)                           \
+  V(ToString, to_string, 1)                             \
+  V(ToName, to_name, 1)                                 \
+  V(ToLength, to_length, 1)                             \
+  V(ToInteger, to_integer, 1)                           \
+  V(ToNumber, to_number, 1)                             \
+  V(ToObject, to_object, 1)                             \
+  V(ValueOf, value_of, 1)
+
 class IntrinsicsHelper {
  public:
+  enum class IntrinsicId {
+#define DECLARE_INTRINSIC_ID(name, lower_case, count) k##name,
+    INTRINSICS_LIST(DECLARE_INTRINSIC_ID)
+#undef DECLARE_INTRINSIC_ID
+        kIdCount
+  };
+  STATIC_ASSERT(static_cast<uint32_t>(IntrinsicId::kIdCount) <= kMaxUInt8);
+
   explicit IntrinsicsHelper(InterpreterAssembler* assembler);
 
   compiler::Node* InvokeIntrinsic(compiler::Node* function_id,
@@ -36,22 +65,36 @@
                                   compiler::Node* arg_count);
 
   static bool IsSupported(Runtime::FunctionId function_id);
+  static IntrinsicId FromRuntimeId(Runtime::FunctionId function_id);
+  static Runtime::FunctionId ToRuntimeId(IntrinsicId intrinsic_id);
 
  private:
   enum InstanceTypeCompareMode {
     kInstanceTypeEqual,
     kInstanceTypeGreaterThanOrEqual
   };
+
+  compiler::Node* IsInstanceType(compiler::Node* input, int type);
   compiler::Node* CompareInstanceType(compiler::Node* map, int type,
                                       InstanceTypeCompareMode mode);
+  compiler::Node* IntrinsicAsStubCall(compiler::Node* input,
+                                      compiler::Node* context,
+                                      Callable const& callable);
   void AbortIfArgCountMismatch(int expected, compiler::Node* actual);
-  InterpreterAssembler* assembler_;
 
-#define DECLARE_INTRINSIC_HELPER(name, lower_case, count) \
-  compiler::Node* name(compiler::Node* input);
+#define DECLARE_INTRINSIC_HELPER(name, lower_case, count)                \
+  compiler::Node* name(compiler::Node* input, compiler::Node* arg_count, \
+                       compiler::Node* context);
   INTRINSICS_LIST(DECLARE_INTRINSIC_HELPER)
 #undef DECLARE_INTRINSIC_HELPER
 
+  Isolate* isolate() { return isolate_; }
+  Zone* zone() { return zone_; }
+
+  Isolate* isolate_;
+  Zone* zone_;
+  InterpreterAssembler* assembler_;
+
   DISALLOW_COPY_AND_ASSIGN(IntrinsicsHelper);
 };
 
diff --git a/src/interpreter/interpreter.cc b/src/interpreter/interpreter.cc
index a42da50..8a05777 100644
--- a/src/interpreter/interpreter.cc
+++ b/src/interpreter/interpreter.cc
@@ -62,10 +62,11 @@
       size_t index = GetDispatchTableIndex(Bytecode::k##Name, operand_scale);  \
       dispatch_table_[index] = code->entry();                                  \
       TraceCodegen(code);                                                      \
-      LOG_CODE_EVENT(                                                          \
+      PROFILE(                                                                 \
           isolate_,                                                            \
           CodeCreateEvent(                                                     \
-              Logger::BYTECODE_HANDLER_TAG, AbstractCode::cast(*code),         \
+              CodeEventListener::BYTECODE_HANDLER_TAG,                         \
+              AbstractCode::cast(*code),                                       \
               Bytecodes::ToString(Bytecode::k##Name, operand_scale).c_str())); \
     }                                                                          \
   }
@@ -180,9 +181,8 @@
 bool Interpreter::IsDispatchTableInitialized() {
   if (FLAG_trace_ignition || FLAG_trace_ignition_codegen ||
       FLAG_trace_ignition_dispatches) {
-    // Regenerate table to add bytecode tracing operations,
-    // print the assembly code generated by TurboFan,
-    // or instrument handlers with dispatch counters.
+    // Regenerate table to add bytecode tracing operations, print the assembly
+    // code generated by TurboFan or instrument handlers with dispatch counters.
     return false;
   }
   return dispatch_table_[0] != nullptr;
@@ -250,7 +250,8 @@
                                     NewStringType::kNormal)
                 .ToLocalChecked();
         Local<v8::Number> counter_object = v8::Number::New(isolate, counter);
-        CHECK(counters_row->Set(context, to_name_object, counter_object)
+        CHECK(counters_row
+                  ->DefineOwnProperty(context, to_name_object, counter_object)
                   .IsJust());
       }
     }
@@ -261,7 +262,9 @@
                                 NewStringType::kNormal)
             .ToLocalChecked();
 
-    CHECK(counters_map->Set(context, from_name_object, counters_row).IsJust());
+    CHECK(
+        counters_map->DefineOwnProperty(context, from_name_object, counters_row)
+            .IsJust());
   }
 
   return counters_map;
@@ -286,19 +289,14 @@
   __ Dispatch();
 }
 
-void Interpreter::DoLoadConstant(InterpreterAssembler* assembler) {
-  Node* index = __ BytecodeOperandIdx(0);
-  Node* constant = __ LoadConstantPoolEntry(index);
-  __ SetAccumulator(constant);
-  __ Dispatch();
-}
-
-
 // LdaConstant <idx>
 //
 // Load constant literal at |idx| in the constant pool into the accumulator.
 void Interpreter::DoLdaConstant(InterpreterAssembler* assembler) {
-  DoLoadConstant(assembler);
+  Node* index = __ BytecodeOperandIdx(0);
+  Node* constant = __ LoadConstantPoolEntry(index);
+  __ SetAccumulator(constant);
+  __ Dispatch();
 }
 
 // LdaUndefined
@@ -311,6 +309,16 @@
   __ Dispatch();
 }
 
+// LdrUndefined <reg>
+//
+// Loads undefined into the accumulator and |reg|.
+void Interpreter::DoLdrUndefined(InterpreterAssembler* assembler) {
+  Node* undefined_value =
+      __ HeapConstant(isolate_->factory()->undefined_value());
+  Node* destination = __ BytecodeOperandReg(0);
+  __ StoreRegister(undefined_value, destination);
+  __ Dispatch();
+}
 
 // LdaNull
 //
@@ -321,7 +329,6 @@
   __ Dispatch();
 }
 
-
 // LdaTheHole
 //
 // Load TheHole into the accumulator.
@@ -331,7 +338,6 @@
   __ Dispatch();
 }
 
-
 // LdaTrue
 //
 // Load True into the accumulator.
@@ -341,7 +347,6 @@
   __ Dispatch();
 }
 
-
 // LdaFalse
 //
 // Load False into the accumulator.
@@ -351,7 +356,6 @@
   __ Dispatch();
 }
 
-
 // Ldar <src>
 //
 // Load accumulator with value from register <src>.
@@ -362,7 +366,6 @@
   __ Dispatch();
 }
 
-
 // Star <dst>
 //
 // Store accumulator to register <dst>.
@@ -373,7 +376,6 @@
   __ Dispatch();
 }
 
-
 // Mov <src> <dst>
 //
 // Stores the value of register <src> to register <dst>.
@@ -385,48 +387,58 @@
   __ Dispatch();
 }
 
-
-void Interpreter::DoLoadGlobal(Callable ic, InterpreterAssembler* assembler) {
+Node* Interpreter::BuildLoadGlobal(Callable ic,
+                                   InterpreterAssembler* assembler) {
   // Get the global object.
   Node* context = __ GetContext();
-  Node* native_context =
-      __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX);
-  Node* global = __ LoadContextSlot(native_context, Context::EXTENSION_INDEX);
 
-  // Load the global via the LoadIC.
+  // Load the global via the LoadGlobalIC.
   Node* code_target = __ HeapConstant(ic.code());
-  Node* constant_index = __ BytecodeOperandIdx(0);
-  Node* name = __ LoadConstantPoolEntry(constant_index);
-  Node* raw_slot = __ BytecodeOperandIdx(1);
+  Node* raw_slot = __ BytecodeOperandIdx(0);
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  Node* result = __ CallStub(ic.descriptor(), code_target, context, global,
-                             name, smi_slot, type_feedback_vector);
-  __ SetAccumulator(result);
-  __ Dispatch();
+  return __ CallStub(ic.descriptor(), code_target, context, smi_slot,
+                     type_feedback_vector);
 }
 
-// LdaGlobal <name_index> <slot>
+// LdaGlobal <slot>
 //
 // Load the global with name in constant pool entry <name_index> into the
 // accumulator using FeedBackVector slot <slot> outside of a typeof.
 void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   UNINITIALIZED);
-  DoLoadGlobal(ic, assembler);
+  Callable ic =
+      CodeFactory::LoadGlobalICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF);
+  Node* result = BuildLoadGlobal(ic, assembler);
+  __ SetAccumulator(result);
+  __ Dispatch();
 }
 
-// LdaGlobalInsideTypeof <name_index> <slot>
+// LdrGlobal <slot> <reg>
+//
+// Load the global with name in constant pool entry <name_index> into
+// register <reg> using FeedBackVector slot <slot> outside of a typeof.
+void Interpreter::DoLdrGlobal(InterpreterAssembler* assembler) {
+  Callable ic =
+      CodeFactory::LoadGlobalICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF);
+  Node* result = BuildLoadGlobal(ic, assembler);
+  Node* destination = __ BytecodeOperandReg(1);
+  __ StoreRegister(result, destination);
+  __ Dispatch();
+}
+
+// LdaGlobalInsideTypeof <slot>
 //
 // Load the global with name in constant pool entry <name_index> into the
 // accumulator using FeedBackVector slot <slot> inside of a typeof.
 void Interpreter::DoLdaGlobalInsideTypeof(InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
-                                                   UNINITIALIZED);
-  DoLoadGlobal(ic, assembler);
+  Callable ic =
+      CodeFactory::LoadGlobalICInOptimizedCode(isolate_, INSIDE_TYPEOF);
+  Node* result = BuildLoadGlobal(ic, assembler);
+  __ SetAccumulator(result);
+  __ Dispatch();
 }
 
-void Interpreter::DoStoreGlobal(Callable ic, InterpreterAssembler* assembler) {
+void Interpreter::DoStaGlobal(Callable ic, InterpreterAssembler* assembler) {
   // Get the global object.
   Node* context = __ GetContext();
   Node* native_context =
@@ -446,40 +458,51 @@
   __ Dispatch();
 }
 
-
 // StaGlobalSloppy <name_index> <slot>
 //
 // Store the value in the accumulator into the global with name in constant pool
 // entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
 void Interpreter::DoStaGlobalSloppy(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
-  DoStoreGlobal(ic, assembler);
+  Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY);
+  DoStaGlobal(ic, assembler);
 }
 
-
 // StaGlobalStrict <name_index> <slot>
 //
 // Store the value in the accumulator into the global with name in constant pool
 // entry <name_index> using FeedBackVector slot <slot> in strict mode.
 void Interpreter::DoStaGlobalStrict(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
-  DoStoreGlobal(ic, assembler);
+  Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT);
+  DoStaGlobal(ic, assembler);
+}
+
+compiler::Node* Interpreter::BuildLoadContextSlot(
+    InterpreterAssembler* assembler) {
+  Node* reg_index = __ BytecodeOperandReg(0);
+  Node* context = __ LoadRegister(reg_index);
+  Node* slot_index = __ BytecodeOperandIdx(1);
+  return __ LoadContextSlot(context, slot_index);
 }
 
 // LdaContextSlot <context> <slot_index>
 //
 // Load the object in |slot_index| of |context| into the accumulator.
 void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) {
-  Node* reg_index = __ BytecodeOperandReg(0);
-  Node* context = __ LoadRegister(reg_index);
-  Node* slot_index = __ BytecodeOperandIdx(1);
-  Node* result = __ LoadContextSlot(context, slot_index);
+  Node* result = BuildLoadContextSlot(assembler);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
+// LdrContextSlot <context> <slot_index> <reg>
+//
+// Load the object in <slot_index> of <context> into register <reg>.
+void Interpreter::DoLdrContextSlot(InterpreterAssembler* assembler) {
+  Node* result = BuildLoadContextSlot(assembler);
+  Node* destination = __ BytecodeOperandReg(2);
+  __ StoreRegister(result, destination);
+  __ Dispatch();
+}
+
 // StaContextSlot <context> <slot_index>
 //
 // Stores the object in the accumulator into |slot_index| of |context|.
@@ -492,8 +515,8 @@
   __ Dispatch();
 }
 
-void Interpreter::DoLoadLookupSlot(Runtime::FunctionId function_id,
-                                   InterpreterAssembler* assembler) {
+void Interpreter::DoLdaLookupSlot(Runtime::FunctionId function_id,
+                                  InterpreterAssembler* assembler) {
   Node* index = __ BytecodeOperandIdx(0);
   Node* name = __ LoadConstantPoolEntry(index);
   Node* context = __ GetContext();
@@ -507,7 +530,7 @@
 // Lookup the object with the name in constant pool entry |name_index|
 // dynamically.
 void Interpreter::DoLdaLookupSlot(InterpreterAssembler* assembler) {
-  DoLoadLookupSlot(Runtime::kLoadLookupSlot, assembler);
+  DoLdaLookupSlot(Runtime::kLoadLookupSlot, assembler);
 }
 
 // LdaLookupSlotInsideTypeof <name_index>
@@ -515,11 +538,11 @@
 // Lookup the object with the name in constant pool entry |name_index|
 // dynamically without causing a NoReferenceError.
 void Interpreter::DoLdaLookupSlotInsideTypeof(InterpreterAssembler* assembler) {
-  DoLoadLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
+  DoLdaLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
 }
 
-void Interpreter::DoStoreLookupSlot(LanguageMode language_mode,
-                                    InterpreterAssembler* assembler) {
+void Interpreter::DoStaLookupSlot(LanguageMode language_mode,
+                                  InterpreterAssembler* assembler) {
   Node* value = __ GetAccumulator();
   Node* index = __ BytecodeOperandIdx(0);
   Node* name = __ LoadConstantPoolEntry(index);
@@ -537,19 +560,19 @@
 // Store the object in accumulator to the object with the name in constant
 // pool entry |name_index| in sloppy mode.
 void Interpreter::DoStaLookupSlotSloppy(InterpreterAssembler* assembler) {
-  DoStoreLookupSlot(LanguageMode::SLOPPY, assembler);
+  DoStaLookupSlot(LanguageMode::SLOPPY, assembler);
 }
 
-
 // StaLookupSlotStrict <name_index>
 //
 // Store the object in accumulator to the object with the name in constant
 // pool entry |name_index| in strict mode.
 void Interpreter::DoStaLookupSlotStrict(InterpreterAssembler* assembler) {
-  DoStoreLookupSlot(LanguageMode::STRICT, assembler);
+  DoStaLookupSlot(LanguageMode::STRICT, assembler);
 }
 
-void Interpreter::DoLoadIC(Callable ic, InterpreterAssembler* assembler) {
+Node* Interpreter::BuildLoadNamedProperty(Callable ic,
+                                          InterpreterAssembler* assembler) {
   Node* code_target = __ HeapConstant(ic.code());
   Node* register_index = __ BytecodeOperandReg(0);
   Node* object = __ LoadRegister(register_index);
@@ -559,23 +582,35 @@
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
   Node* context = __ GetContext();
-  Node* result = __ CallStub(ic.descriptor(), code_target, context, object,
-                             name, smi_slot, type_feedback_vector);
+  return __ CallStub(ic.descriptor(), code_target, context, object, name,
+                     smi_slot, type_feedback_vector);
+}
+
+// LdaNamedProperty <object> <name_index> <slot>
+//
+// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
+// constant pool entry <name_index>.
+void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_);
+  Node* result = BuildLoadNamedProperty(ic, assembler);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
-// LoadIC <object> <name_index> <slot>
+// LdrNamedProperty <object> <name_index> <slot> <reg>
 //
 // Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
-// constant pool entry <name_index>.
-void Interpreter::DoLoadIC(InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   UNINITIALIZED);
-  DoLoadIC(ic, assembler);
+// constant pool entry <name_index> and puts the result into register <reg>.
+void Interpreter::DoLdrNamedProperty(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_);
+  Node* result = BuildLoadNamedProperty(ic, assembler);
+  Node* destination = __ BytecodeOperandReg(3);
+  __ StoreRegister(result, destination);
+  __ Dispatch();
 }
 
-void Interpreter::DoKeyedLoadIC(Callable ic, InterpreterAssembler* assembler) {
+Node* Interpreter::BuildLoadKeyedProperty(Callable ic,
+                                          InterpreterAssembler* assembler) {
   Node* code_target = __ HeapConstant(ic.code());
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* object = __ LoadRegister(reg_index);
@@ -584,20 +619,31 @@
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
   Node* context = __ GetContext();
-  Node* result = __ CallStub(ic.descriptor(), code_target, context, object,
-                             name, smi_slot, type_feedback_vector);
-  __ SetAccumulator(result);
-  __ Dispatch();
+  return __ CallStub(ic.descriptor(), code_target, context, object, name,
+                     smi_slot, type_feedback_vector);
 }
 
 // KeyedLoadIC <object> <slot>
 //
 // Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
 // in the accumulator.
-void Interpreter::DoKeyedLoadIC(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, UNINITIALIZED);
-  DoKeyedLoadIC(ic, assembler);
+void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
+  Node* result = BuildLoadKeyedProperty(ic, assembler);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+// LdrKeyedProperty <object> <slot> <reg>
+//
+// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
+// in the accumulator and puts the result in register <reg>.
+void Interpreter::DoLdrKeyedProperty(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
+  Node* result = BuildLoadKeyedProperty(ic, assembler);
+  Node* destination = __ BytecodeOperandReg(2);
+  __ StoreRegister(result, destination);
+  __ Dispatch();
 }
 
 void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) {
@@ -616,27 +662,23 @@
   __ Dispatch();
 }
 
-
-// StoreICSloppy <object> <name_index> <slot>
+// StaNamedPropertySloppy <object> <name_index> <slot>
 //
 // Calls the sloppy mode StoreIC at FeedBackVector slot <slot> for <object> and
 // the name in constant pool entry <name_index> with the value in the
 // accumulator.
-void Interpreter::DoStoreICSloppy(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+void Interpreter::DoStaNamedPropertySloppy(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY);
   DoStoreIC(ic, assembler);
 }
 
-
-// StoreICStrict <object> <name_index> <slot>
+// StaNamedPropertyStrict <object> <name_index> <slot>
 //
 // Calls the strict mode StoreIC at FeedBackVector slot <slot> for <object> and
 // the name in constant pool entry <name_index> with the value in the
 // accumulator.
-void Interpreter::DoStoreICStrict(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+void Interpreter::DoStaNamedPropertyStrict(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT);
   DoStoreIC(ic, assembler);
 }
 
@@ -656,25 +698,21 @@
   __ Dispatch();
 }
 
-
-// KeyedStoreICSloppy <object> <key> <slot>
+// StaKeyedPropertySloppy <object> <key> <slot>
 //
 // Calls the sloppy mode KeyStoreIC at FeedBackVector slot <slot> for <object>
 // and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICSloppy(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+void Interpreter::DoStaKeyedPropertySloppy(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY);
   DoKeyedStoreIC(ic, assembler);
 }
 
-
-// KeyedStoreICStore <object> <key> <slot>
+// StaKeyedPropertyStrict <object> <key> <slot>
 //
 // Calls the strict mode KeyStoreIC at FeedBackVector slot <slot> for <object>
 // and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICStrict(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+void Interpreter::DoStaKeyedPropertyStrict(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT);
   DoKeyedStoreIC(ic, assembler);
 }
 
@@ -691,7 +729,6 @@
   __ Dispatch();
 }
 
-
 // PopContext <context>
 //
 // Pops the current context and sets <context> as the new context.
@@ -702,33 +739,6 @@
   __ Dispatch();
 }
 
-void Interpreter::DoBinaryOp(Callable callable,
-                             InterpreterAssembler* assembler) {
-  // TODO(bmeurer): Collect definition side type feedback for various
-  // binary operations.
-  Node* target = __ HeapConstant(callable.code());
-  Node* reg_index = __ BytecodeOperandReg(0);
-  Node* lhs = __ LoadRegister(reg_index);
-  Node* rhs = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* result = __ CallStub(callable.descriptor(), target, context, lhs, rhs);
-  __ SetAccumulator(result);
-  __ Dispatch();
-}
-
-void Interpreter::DoBinaryOp(Runtime::FunctionId function_id,
-                             InterpreterAssembler* assembler) {
-  // TODO(rmcilroy): Call ICs which back-patch bytecode with type specialized
-  // operations, instead of calling builtins directly.
-  Node* reg_index = __ BytecodeOperandReg(0);
-  Node* lhs = __ LoadRegister(reg_index);
-  Node* rhs = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* result = __ CallRuntime(function_id, context, lhs, rhs);
-  __ SetAccumulator(result);
-  __ Dispatch();
-}
-
 template <class Generator>
 void Interpreter::DoBinaryOp(InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(0);
@@ -747,7 +757,6 @@
   DoBinaryOp<AddStub>(assembler);
 }
 
-
 // Sub <src>
 //
 // Subtract register <src> from accumulator.
@@ -755,7 +764,6 @@
   DoBinaryOp<SubtractStub>(assembler);
 }
 
-
 // Mul <src>
 //
 // Multiply accumulator by register <src>.
@@ -763,7 +771,6 @@
   DoBinaryOp<MultiplyStub>(assembler);
 }
 
-
 // Div <src>
 //
 // Divide register <src> by accumulator.
@@ -771,7 +778,6 @@
   DoBinaryOp<DivideStub>(assembler);
 }
 
-
 // Mod <src>
 //
 // Modulo register <src> by accumulator.
@@ -779,7 +785,6 @@
   DoBinaryOp<ModulusStub>(assembler);
 }
 
-
 // BitwiseOr <src>
 //
 // BitwiseOr register <src> to accumulator.
@@ -787,7 +792,6 @@
   DoBinaryOp<BitwiseOrStub>(assembler);
 }
 
-
 // BitwiseXor <src>
 //
 // BitwiseXor register <src> to accumulator.
@@ -795,7 +799,6 @@
   DoBinaryOp<BitwiseXorStub>(assembler);
 }
 
-
 // BitwiseAnd <src>
 //
 // BitwiseAnd register <src> to accumulator.
@@ -803,7 +806,6 @@
   DoBinaryOp<BitwiseAndStub>(assembler);
 }
 
-
 // ShiftLeft <src>
 //
 // Left shifts register <src> by the count specified in the accumulator.
@@ -814,7 +816,6 @@
   DoBinaryOp<ShiftLeftStub>(assembler);
 }
 
-
 // ShiftRight <src>
 //
 // Right shifts register <src> by the count specified in the accumulator.
@@ -825,7 +826,6 @@
   DoBinaryOp<ShiftRightStub>(assembler);
 }
 
-
 // ShiftRightLogical <src>
 //
 // Right Shifts register <src> by the count specified in the accumulator.
@@ -836,6 +836,17 @@
   DoBinaryOp<ShiftRightLogicalStub>(assembler);
 }
 
+void Interpreter::DoUnaryOp(Callable callable,
+                            InterpreterAssembler* assembler) {
+  Node* target = __ HeapConstant(callable.code());
+  Node* accumulator = __ GetAccumulator();
+  Node* context = __ GetContext();
+  Node* result =
+      __ CallStub(callable.descriptor(), target, context, accumulator);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
 template <class Generator>
 void Interpreter::DoUnaryOp(InterpreterAssembler* assembler) {
   Node* value = __ GetAccumulator();
@@ -845,6 +856,27 @@
   __ Dispatch();
 }
 
+// ToName
+//
+// Cast the object referenced by the accumulator to a name.
+void Interpreter::DoToName(InterpreterAssembler* assembler) {
+  DoUnaryOp(CodeFactory::ToName(isolate_), assembler);
+}
+
+// ToNumber
+//
+// Cast the object referenced by the accumulator to a number.
+void Interpreter::DoToNumber(InterpreterAssembler* assembler) {
+  DoUnaryOp(CodeFactory::ToNumber(isolate_), assembler);
+}
+
+// ToObject
+//
+// Cast the object referenced by the accumulator to a JSObject.
+void Interpreter::DoToObject(InterpreterAssembler* assembler) {
+  DoUnaryOp(CodeFactory::ToObject(isolate_), assembler);
+}
+
 // Inc
 //
 // Increments value in the accumulator by one.
@@ -859,14 +891,22 @@
   DoUnaryOp<DecStub>(assembler);
 }
 
-void Interpreter::DoLogicalNotOp(Node* value, InterpreterAssembler* assembler) {
+Node* Interpreter::BuildToBoolean(Node* value,
+                                  InterpreterAssembler* assembler) {
+  Node* context = __ GetContext();
+  return ToBooleanStub::Generate(assembler, value, context);
+}
+
+Node* Interpreter::BuildLogicalNot(Node* value,
+                                   InterpreterAssembler* assembler) {
+  Variable result(assembler, MachineRepresentation::kTagged);
   Label if_true(assembler), if_false(assembler), end(assembler);
   Node* true_value = __ BooleanConstant(true);
   Node* false_value = __ BooleanConstant(false);
   __ BranchIfWordEqual(value, true_value, &if_true, &if_false);
   __ Bind(&if_true);
   {
-    __ SetAccumulator(false_value);
+    result.Bind(false_value);
     __ Goto(&end);
   }
   __ Bind(&if_false);
@@ -875,24 +915,23 @@
       __ AbortIfWordNotEqual(value, false_value,
                              BailoutReason::kExpectedBooleanValue);
     }
-    __ SetAccumulator(true_value);
+    result.Bind(true_value);
     __ Goto(&end);
   }
   __ Bind(&end);
+  return result.value();
 }
 
-// ToBooleanLogicalNot
+// LogicalNot
 //
 // Perform logical-not on the accumulator, first casting the
 // accumulator to a boolean value if required.
+// ToBooleanLogicalNot
 void Interpreter::DoToBooleanLogicalNot(InterpreterAssembler* assembler) {
-  Callable callable = CodeFactory::ToBoolean(isolate_);
-  Node* target = __ HeapConstant(callable.code());
-  Node* accumulator = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* to_boolean_value =
-      __ CallStub(callable.descriptor(), target, context, accumulator);
-  DoLogicalNotOp(to_boolean_value, assembler);
+  Node* value = __ GetAccumulator();
+  Node* to_boolean_value = BuildToBoolean(value, assembler);
+  Node* result = BuildLogicalNot(to_boolean_value, assembler);
+  __ SetAccumulator(result);
   __ Dispatch();
 }
 
@@ -902,7 +941,8 @@
 // value.
 void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
   Node* value = __ GetAccumulator();
-  DoLogicalNotOp(value, assembler);
+  Node* result = BuildLogicalNot(value, assembler);
+  __ SetAccumulator(result);
   __ Dispatch();
 }
 
@@ -911,14 +951,7 @@
 // Load the accumulator with the string representating type of the
 // object in the accumulator.
 void Interpreter::DoTypeOf(InterpreterAssembler* assembler) {
-  Callable callable = CodeFactory::Typeof(isolate_);
-  Node* target = __ HeapConstant(callable.code());
-  Node* accumulator = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* result =
-      __ CallStub(callable.descriptor(), target, context, accumulator);
-  __ SetAccumulator(result);
-  __ Dispatch();
+  DoUnaryOp(CodeFactory::Typeof(isolate_), assembler);
 }
 
 void Interpreter::DoDelete(Runtime::FunctionId function_id,
@@ -932,7 +965,6 @@
   __ Dispatch();
 }
 
-
 // DeletePropertyStrict
 //
 // Delete the property specified in the accumulator from the object
@@ -941,7 +973,6 @@
   DoDelete(Runtime::kDeleteProperty_Strict, assembler);
 }
 
-
 // DeletePropertySloppy
 //
 // Delete the property specified in the accumulator from the object
@@ -967,7 +998,6 @@
   __ Dispatch();
 }
 
-
 // Call <callable> <receiver> <arg_count>
 //
 // Call a JSfunction or Callable in |callable| with the |receiver| and
@@ -995,7 +1025,6 @@
   __ Dispatch();
 }
 
-
 // CallRuntime <function_id> <first_arg> <arg_count>
 //
 // Call the runtime function |function_id| with the first argument in
@@ -1011,7 +1040,7 @@
 // |function_id| with the first argument in |first_arg| and |arg_count|
 // arguments in subsequent registers.
 void Interpreter::DoInvokeIntrinsic(InterpreterAssembler* assembler) {
-  Node* function_id = __ BytecodeOperandRuntimeId(0);
+  Node* function_id = __ BytecodeOperandIntrinsicId(0);
   Node* first_arg_reg = __ BytecodeOperandReg(1);
   Node* arg_count = __ BytecodeOperandCount(2);
   Node* context = __ GetContext();
@@ -1042,7 +1071,6 @@
   __ Dispatch();
 }
 
-
 // CallRuntimeForPair <function_id> <first_arg> <arg_count> <first_return>
 //
 // Call the runtime function |function_id| which returns a pair, with the
@@ -1074,7 +1102,6 @@
   __ Dispatch();
 }
 
-
 // CallJSRuntime <context_index> <receiver> <arg_count>
 //
 // Call the JS runtime function that has the |context_index| with the receiver
@@ -1098,7 +1125,6 @@
   __ Dispatch();
 }
 
-
 // New <constructor> <first_arg> <arg_count>
 //
 // Call operator new with |constructor| and the first argument in
@@ -1113,109 +1139,67 @@
 //
 // Test if the value in the <src> register equals the accumulator.
 void Interpreter::DoTestEqual(InterpreterAssembler* assembler) {
-  DoBinaryOp(CodeFactory::Equal(isolate_), assembler);
+  DoBinaryOp<EqualStub>(assembler);
 }
 
-
 // TestNotEqual <src>
 //
 // Test if the value in the <src> register is not equal to the accumulator.
 void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) {
-  DoBinaryOp(CodeFactory::NotEqual(isolate_), assembler);
+  DoBinaryOp<NotEqualStub>(assembler);
 }
 
-
 // TestEqualStrict <src>
 //
 // Test if the value in the <src> register is strictly equal to the accumulator.
 void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) {
-  DoBinaryOp(CodeFactory::StrictEqual(isolate_), assembler);
+  DoBinaryOp<StrictEqualStub>(assembler);
 }
 
-
 // TestLessThan <src>
 //
 // Test if the value in the <src> register is less than the accumulator.
 void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) {
-  DoBinaryOp(CodeFactory::LessThan(isolate_), assembler);
+  DoBinaryOp<LessThanStub>(assembler);
 }
 
-
 // TestGreaterThan <src>
 //
 // Test if the value in the <src> register is greater than the accumulator.
 void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) {
-  DoBinaryOp(CodeFactory::GreaterThan(isolate_), assembler);
+  DoBinaryOp<GreaterThanStub>(assembler);
 }
 
-
 // TestLessThanOrEqual <src>
 //
 // Test if the value in the <src> register is less than or equal to the
 // accumulator.
 void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) {
-  DoBinaryOp(CodeFactory::LessThanOrEqual(isolate_), assembler);
+  DoBinaryOp<LessThanOrEqualStub>(assembler);
 }
 
-
 // TestGreaterThanOrEqual <src>
 //
 // Test if the value in the <src> register is greater than or equal to the
 // accumulator.
 void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) {
-  DoBinaryOp(CodeFactory::GreaterThanOrEqual(isolate_), assembler);
+  DoBinaryOp<GreaterThanOrEqualStub>(assembler);
 }
 
-
 // TestIn <src>
 //
 // Test if the object referenced by the register operand is a property of the
 // object referenced by the accumulator.
 void Interpreter::DoTestIn(InterpreterAssembler* assembler) {
-  DoBinaryOp(CodeFactory::HasProperty(isolate_), assembler);
+  DoBinaryOp<HasPropertyStub>(assembler);
 }
 
-
 // TestInstanceOf <src>
 //
 // Test if the object referenced by the <src> register is an an instance of type
 // referenced by the accumulator.
 void Interpreter::DoTestInstanceOf(InterpreterAssembler* assembler) {
-  DoBinaryOp(CodeFactory::InstanceOf(isolate_), assembler);
-}
-
-void Interpreter::DoTypeConversionOp(Callable callable,
-                                     InterpreterAssembler* assembler) {
-  Node* target = __ HeapConstant(callable.code());
-  Node* accumulator = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* result =
-      __ CallStub(callable.descriptor(), target, context, accumulator);
-  __ SetAccumulator(result);
-  __ Dispatch();
-}
-
-// ToName
-//
-// Cast the object referenced by the accumulator to a name.
-void Interpreter::DoToName(InterpreterAssembler* assembler) {
-  DoTypeConversionOp(CodeFactory::ToName(isolate_), assembler);
-}
-
-
-// ToNumber
-//
-// Cast the object referenced by the accumulator to a number.
-void Interpreter::DoToNumber(InterpreterAssembler* assembler) {
-  DoTypeConversionOp(CodeFactory::ToNumber(isolate_), assembler);
-}
-
-
-// ToObject
-//
-// Cast the object referenced by the accumulator to a JSObject.
-void Interpreter::DoToObject(InterpreterAssembler* assembler) {
-  DoTypeConversionOp(CodeFactory::ToObject(isolate_), assembler);
+  DoBinaryOp<InstanceOfStub>(assembler);
 }
 
 // Jump <imm>
@@ -1289,12 +1273,8 @@
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is true when the object is cast to boolean.
 void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) {
-  Callable callable = CodeFactory::ToBoolean(isolate_);
-  Node* target = __ HeapConstant(callable.code());
   Node* accumulator = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* to_boolean_value =
-      __ CallStub(callable.descriptor(), target, context, accumulator);
+  Node* to_boolean_value = BuildToBoolean(accumulator, assembler);
   Node* relative_jump = __ BytecodeOperandImm(0);
   Node* true_value = __ BooleanConstant(true);
   __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
@@ -1307,12 +1287,8 @@
 // to boolean.
 void Interpreter::DoJumpIfToBooleanTrueConstant(
     InterpreterAssembler* assembler) {
-  Callable callable = CodeFactory::ToBoolean(isolate_);
-  Node* target = __ HeapConstant(callable.code());
   Node* accumulator = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* to_boolean_value =
-      __ CallStub(callable.descriptor(), target, context, accumulator);
+  Node* to_boolean_value = BuildToBoolean(accumulator, assembler);
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
   Node* relative_jump = __ SmiUntag(constant);
@@ -1325,12 +1301,8 @@
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is false when the object is cast to boolean.
 void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) {
-  Callable callable = CodeFactory::ToBoolean(isolate_);
-  Node* target = __ HeapConstant(callable.code());
   Node* accumulator = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* to_boolean_value =
-      __ CallStub(callable.descriptor(), target, context, accumulator);
+  Node* to_boolean_value = BuildToBoolean(accumulator, assembler);
   Node* relative_jump = __ BytecodeOperandImm(0);
   Node* false_value = __ BooleanConstant(false);
   __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
@@ -1343,12 +1315,8 @@
 // to boolean.
 void Interpreter::DoJumpIfToBooleanFalseConstant(
     InterpreterAssembler* assembler) {
-  Callable callable = CodeFactory::ToBoolean(isolate_);
-  Node* target = __ HeapConstant(callable.code());
   Node* accumulator = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* to_boolean_value =
-      __ CallStub(callable.descriptor(), target, context, accumulator);
+  Node* to_boolean_value = BuildToBoolean(accumulator, assembler);
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
   Node* relative_jump = __ SmiUntag(constant);
@@ -1579,7 +1547,6 @@
   }
 }
 
-
 // CreateUnmappedArguments
 //
 // Creates a new unmapped arguments object.
@@ -1639,7 +1606,6 @@
   __ Abort(kUnexpectedReturnFromThrow);
 }
 
-
 // ReThrow
 //
 // Re-throws the exception in the accumulator.
@@ -1651,7 +1617,6 @@
   __ Abort(kUnexpectedReturnFromThrow);
 }
 
-
 // Return
 //
 // Return the value in the accumulator.
@@ -1821,11 +1786,23 @@
 // SuspendGenerator <generator>
 //
 // Exports the register file and stores it into the generator.  Also stores the
-// current context and the state given in the accumulator into the generator.
+// current context, the state given in the accumulator, and the current bytecode
+// offset (for debugging purposes) into the generator.
 void Interpreter::DoSuspendGenerator(InterpreterAssembler* assembler) {
   Node* generator_reg = __ BytecodeOperandReg(0);
   Node* generator = __ LoadRegister(generator_reg);
 
+  Label if_stepping(assembler, Label::kDeferred), ok(assembler);
+  Node* step_action_address = __ ExternalConstant(
+      ExternalReference::debug_last_step_action_address(isolate_));
+  Node* step_action = __ Load(MachineType::Int8(), step_action_address);
+  STATIC_ASSERT(StepIn > StepNext);
+  STATIC_ASSERT(StepFrame > StepNext);
+  STATIC_ASSERT(LastStepAction == StepFrame);
+  Node* step_next = __ Int32Constant(StepNext);
+  __ BranchIfInt32LessThanOrEqual(step_next, step_action, &if_stepping, &ok);
+  __ Bind(&ok);
+
   Node* array =
       __ LoadObjectField(generator, JSGeneratorObject::kOperandStackOffset);
   Node* context = __ GetContext();
@@ -1835,7 +1812,18 @@
   __ StoreObjectField(generator, JSGeneratorObject::kContextOffset, context);
   __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset, state);
 
+  Node* offset = __ SmiTag(__ BytecodeOffset());
+  __ StoreObjectField(generator, JSGeneratorObject::kInputOrDebugPosOffset,
+                      offset);
+
   __ Dispatch();
+
+  __ Bind(&if_stepping);
+  {
+    Node* context = __ GetContext();
+    __ CallRuntime(Runtime::kDebugRecordAsyncFunction, context, generator);
+    __ Goto(&ok);
+  }
 }
 
 // ResumeGenerator <generator>
diff --git a/src/interpreter/interpreter.h b/src/interpreter/interpreter.h
index d774d8b..468486c 100644
--- a/src/interpreter/interpreter.h
+++ b/src/interpreter/interpreter.h
@@ -70,17 +70,13 @@
   BYTECODE_LIST(DECLARE_BYTECODE_HANDLER_GENERATOR)
 #undef DECLARE_BYTECODE_HANDLER_GENERATOR
 
-  // Generates code to perform the binary operation via |callable|.
-  void DoBinaryOp(Callable callable, InterpreterAssembler* assembler);
-
-  // Generates code to perform the binary operation via |function_id|.
-  void DoBinaryOp(Runtime::FunctionId function_id,
-                  InterpreterAssembler* assembler);
-
   // Generates code to perform the binary operation via |Generator|.
   template <class Generator>
   void DoBinaryOp(InterpreterAssembler* assembler);
 
+  // Generates code to perform the unary operation via |callable|.
+  void DoUnaryOp(Callable callable, InterpreterAssembler* assembler);
+
   // Generates code to perform the unary operation via |Generator|.
   template <class Generator>
   void DoUnaryOp(InterpreterAssembler* assembler);
@@ -89,22 +85,10 @@
   // |compare_op|.
   void DoCompareOp(Token::Value compare_op, InterpreterAssembler* assembler);
 
-  // Generates code to load a constant from the constant pool.
-  void DoLoadConstant(InterpreterAssembler* assembler);
-
-  // Generates code to perform a global load via |ic|.
-  void DoLoadGlobal(Callable ic, InterpreterAssembler* assembler);
-
   // Generates code to perform a global store via |ic|.
-  void DoStoreGlobal(Callable ic, InterpreterAssembler* assembler);
+  void DoStaGlobal(Callable ic, InterpreterAssembler* assembler);
 
-  // Generates code to perform a named property load via |ic|.
-  void DoLoadIC(Callable ic, InterpreterAssembler* assembler);
-
-  // Generates code to perform a keyed property load via |ic|.
-  void DoKeyedLoadIC(Callable ic, InterpreterAssembler* assembler);
-
-  // Generates code to perform a namedproperty store via |ic|.
+  // Generates code to perform a named property store via |ic|.
   void DoStoreIC(Callable ic, InterpreterAssembler* assembler);
 
   // Generates code to perform a keyed property store via |ic|.
@@ -125,23 +109,44 @@
   // Generates code to perform a constructor call.
   void DoCallConstruct(InterpreterAssembler* assembler);
 
-  // Generates code to perform a type conversion.
-  void DoTypeConversionOp(Callable callable, InterpreterAssembler* assembler);
-
-  // Generates code to perform logical-not on boolean |value|.
-  void DoLogicalNotOp(compiler::Node* value, InterpreterAssembler* assembler);
-
   // Generates code to perform delete via function_id.
   void DoDelete(Runtime::FunctionId function_id,
                 InterpreterAssembler* assembler);
 
   // Generates code to perform a lookup slot load via |function_id|.
-  void DoLoadLookupSlot(Runtime::FunctionId function_id,
-                        InterpreterAssembler* assembler);
+  void DoLdaLookupSlot(Runtime::FunctionId function_id,
+                       InterpreterAssembler* assembler);
 
   // Generates code to perform a lookup slot store depending on |language_mode|.
-  void DoStoreLookupSlot(LanguageMode language_mode,
-                         InterpreterAssembler* assembler);
+  void DoStaLookupSlot(LanguageMode language_mode,
+                       InterpreterAssembler* assembler);
+
+  // Generates a node with the undefined constant.
+  compiler::Node* BuildLoadUndefined(InterpreterAssembler* assembler);
+
+  // Generates code to load a context slot.
+  compiler::Node* BuildLoadContextSlot(InterpreterAssembler* assembler);
+
+  // Generates code to load a global.
+  compiler::Node* BuildLoadGlobal(Callable ic, InterpreterAssembler* assembler);
+
+  // Generates code to load a named property.
+  compiler::Node* BuildLoadNamedProperty(Callable ic,
+                                         InterpreterAssembler* assembler);
+
+  // Generates code to load a keyed property.
+  compiler::Node* BuildLoadKeyedProperty(Callable ic,
+                                         InterpreterAssembler* assembler);
+
+  // Generates code to perform logical-not on boolean |value| and returns the
+  // result.
+  compiler::Node* BuildLogicalNot(compiler::Node* value,
+                                  InterpreterAssembler* assembler);
+
+  // Generates code to convert |value| to a boolean and returns the
+  // result.
+  compiler::Node* BuildToBoolean(compiler::Node* value,
+                                 InterpreterAssembler* assembler);
 
   uintptr_t GetDispatchCounter(Bytecode from, Bytecode to) const;
 
diff --git a/src/interpreter/source-position-table.cc b/src/interpreter/source-position-table.cc
index 65bfa20..579c6c4 100644
--- a/src/interpreter/source-position-table.cc
+++ b/src/interpreter/source-position-table.cc
@@ -23,19 +23,13 @@
 // - we record the difference from the previous position,
 // - we just stuff one bit for the type into the bytecode offset,
 // - we write least-significant bits first,
-// - negative numbers occur only rarely, so we use a denormalized
-//   most-significant byte (a byte with all zeros, which normally wouldn't
-//   make any sense) to encode a negative sign, so that we 'pay' nothing for
-//   positive numbers, but have to pay a full byte for negative integers.
+// - we use zig-zag encoding to encode both positive and negative numbers.
 
 namespace {
 
-// A zero-value in the most-significant byte is used to mark negative numbers.
-const int kNegativeSignMarker = 0;
-
 // Each byte is encoded as MoreBit | ValueBits.
 class MoreBit : public BitField8<bool, 7, 1> {};
-class ValueBits : public BitField8<int, 0, 7> {};
+class ValueBits : public BitField8<unsigned, 0, 7> {};
 
 // Helper: Add the offsets from 'other' to 'value'. Also set is_statement.
 void AddAndSetEntry(PositionTableEntry& value,
@@ -54,62 +48,57 @@
 
 // Helper: Encode an integer.
 void EncodeInt(ZoneVector<byte>& bytes, int value) {
-  bool sign = false;
-  if (value < 0) {
-    sign = true;
-    value = -value;
-  }
-
+  // Zig-zag encoding.
+  static const int kShift = kIntSize * kBitsPerByte - 1;
+  value = ((value << 1) ^ (value >> kShift));
+  DCHECK_GE(value, 0);
+  unsigned int encoded = static_cast<unsigned int>(value);
   bool more;
   do {
-    more = value > ValueBits::kMax;
-    bytes.push_back(MoreBit::encode(more || sign) |
-                    ValueBits::encode(value & ValueBits::kMax));
-    value >>= ValueBits::kSize;
+    more = encoded > ValueBits::kMax;
+    bytes.push_back(MoreBit::encode(more) |
+                    ValueBits::encode(encoded & ValueBits::kMask));
+    encoded >>= ValueBits::kSize;
   } while (more);
-
-  if (sign) {
-    bytes.push_back(MoreBit::encode(false) |
-                    ValueBits::encode(kNegativeSignMarker));
-  }
 }
 
 // Encode a PositionTableEntry.
 void EncodeEntry(ZoneVector<byte>& bytes, const PositionTableEntry& entry) {
-  // 1 bit for sign + is_statement each, which leaves 30b for the value.
-  DCHECK(abs(entry.bytecode_offset) < (1 << 30));
-  EncodeInt(bytes, (entry.is_statement ? 1 : 0) | (entry.bytecode_offset << 1));
+  // We only accept ascending bytecode offsets.
+  DCHECK(entry.bytecode_offset >= 0);
+  // Since bytecode_offset is not negative, we use sign to encode is_statement.
+  EncodeInt(bytes, entry.is_statement ? entry.bytecode_offset
+                                      : -entry.bytecode_offset - 1);
   EncodeInt(bytes, entry.source_position);
 }
 
 // Helper: Decode an integer.
 void DecodeInt(ByteArray* bytes, int* index, int* v) {
   byte current;
-  int n = 0;
-  int value = 0;
+  int shift = 0;
+  int decoded = 0;
   bool more;
   do {
     current = bytes->get((*index)++);
-    value |= ValueBits::decode(current) << (n * ValueBits::kSize);
-    n++;
+    decoded |= ValueBits::decode(current) << shift;
     more = MoreBit::decode(current);
+    shift += ValueBits::kSize;
   } while (more);
-
-  if (ValueBits::decode(current) == kNegativeSignMarker) {
-    value = -value;
-  }
-  *v = value;
+  DCHECK_GE(decoded, 0);
+  decoded = (decoded >> 1) ^ (-(decoded & 1));
+  *v = decoded;
 }
 
 void DecodeEntry(ByteArray* bytes, int* index, PositionTableEntry* entry) {
   int tmp;
   DecodeInt(bytes, index, &tmp);
-  entry->is_statement = (tmp & 1);
-
-  // Note that '>>' needs to be arithmetic shift in order to handle negative
-  // numbers properly.
-  entry->bytecode_offset = (tmp >> 1);
-
+  if (tmp >= 0) {
+    entry->is_statement = true;
+    entry->bytecode_offset = tmp;
+  } else {
+    entry->is_statement = false;
+    entry->bytecode_offset = -(tmp + 1);
+  }
   DecodeInt(bytes, index, &entry->source_position);
 }
 
diff --git a/src/isolate-inl.h b/src/isolate-inl.h
index 48ea0aa..46f29b6 100644
--- a/src/isolate-inl.h
+++ b/src/isolate-inl.h
@@ -20,26 +20,26 @@
 
 Object* Isolate::pending_exception() {
   DCHECK(has_pending_exception());
-  DCHECK(!thread_local_top_.pending_exception_->IsException());
+  DCHECK(!thread_local_top_.pending_exception_->IsException(this));
   return thread_local_top_.pending_exception_;
 }
 
 
 void Isolate::set_pending_exception(Object* exception_obj) {
-  DCHECK(!exception_obj->IsException());
+  DCHECK(!exception_obj->IsException(this));
   thread_local_top_.pending_exception_ = exception_obj;
 }
 
 
 void Isolate::clear_pending_exception() {
-  DCHECK(!thread_local_top_.pending_exception_->IsException());
+  DCHECK(!thread_local_top_.pending_exception_->IsException(this));
   thread_local_top_.pending_exception_ = heap_.the_hole_value();
 }
 
 
 bool Isolate::has_pending_exception() {
-  DCHECK(!thread_local_top_.pending_exception_->IsException());
-  return !thread_local_top_.pending_exception_->IsTheHole();
+  DCHECK(!thread_local_top_.pending_exception_->IsException(this));
+  return !thread_local_top_.pending_exception_->IsTheHole(this);
 }
 
 
@@ -50,19 +50,19 @@
 
 Object* Isolate::scheduled_exception() {
   DCHECK(has_scheduled_exception());
-  DCHECK(!thread_local_top_.scheduled_exception_->IsException());
+  DCHECK(!thread_local_top_.scheduled_exception_->IsException(this));
   return thread_local_top_.scheduled_exception_;
 }
 
 
 bool Isolate::has_scheduled_exception() {
-  DCHECK(!thread_local_top_.scheduled_exception_->IsException());
+  DCHECK(!thread_local_top_.scheduled_exception_->IsException(this));
   return thread_local_top_.scheduled_exception_ != heap_.the_hole_value();
 }
 
 
 void Isolate::clear_scheduled_exception() {
-  DCHECK(!thread_local_top_.scheduled_exception_->IsException());
+  DCHECK(!thread_local_top_.scheduled_exception_->IsException(this));
   thread_local_top_.scheduled_exception_ = heap_.the_hole_value();
 }
 
@@ -102,7 +102,6 @@
 #undef NATIVE_CONTEXT_FIELD_ACCESSOR
 
 bool Isolate::IsArraySpeciesLookupChainIntact() {
-  if (!FLAG_harmony_species) return true;
   // Note: It would be nice to have debug checks to make sure that the
   // species protector is accurate, but this would be hard to do for most of
   // what the protector stands for:
@@ -121,7 +120,6 @@
 }
 
 bool Isolate::IsHasInstanceLookupChainIntact() {
-  if (!FLAG_harmony_instanceof) return true;
   PropertyCell* has_instance_cell = heap()->has_instance_protector();
   return has_instance_cell->value() == Smi::FromInt(kArrayProtectorValid);
 }
diff --git a/src/isolate.cc b/src/isolate.cc
index 9d35113..0075a41 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -27,10 +27,10 @@
 #include "src/ic/stub-cache.h"
 #include "src/interpreter/interpreter.h"
 #include "src/isolate-inl.h"
+#include "src/libsampler/v8-sampler.h"
 #include "src/log.h"
 #include "src/messages.h"
 #include "src/profiler/cpu-profiler.h"
-#include "src/profiler/sampler.h"
 #include "src/prototype.h"
 #include "src/regexp/regexp-stack.h"
 #include "src/runtime-profiler.h"
@@ -39,7 +39,7 @@
 #include "src/v8.h"
 #include "src/version.h"
 #include "src/vm-state-inl.h"
-
+#include "src/wasm/wasm-module.h"
 
 namespace v8 {
 namespace internal {
@@ -386,7 +386,8 @@
     switch (frame->type()) {
       case StackFrame::JAVA_SCRIPT:
       case StackFrame::OPTIMIZED:
-      case StackFrame::INTERPRETED: {
+      case StackFrame::INTERPRETED:
+      case StackFrame::BUILTIN: {
         JavaScriptFrame* js_frame = JavaScriptFrame::cast(frame);
         // Set initial size to the maximum inlining level + 1 for the outermost
         // function.
@@ -551,28 +552,17 @@
     Handle<Script> script(Script::cast(fun->shared()->script()));
 
     if (!line_key_.is_null()) {
-      int script_line_offset = script->line_offset();
-      int line_number = Script::GetLineNumber(script, position);
-      // line_number is already shifted by the script_line_offset.
-      int relative_line_number = line_number - script_line_offset;
-      if (!column_key_.is_null() && relative_line_number >= 0) {
-        Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
-        int start =
-            (relative_line_number == 0)
-                ? 0
-                : Smi::cast(line_ends->get(relative_line_number - 1))->value() +
-                      1;
-        int column_offset = position - start;
-        if (relative_line_number == 0) {
-          // For the case where the code is on the same line as the script tag.
-          column_offset += script->column_offset();
-        }
+      Script::PositionInfo info;
+      bool valid_pos =
+          script->GetPositionInfo(position, &info, Script::WITH_OFFSET);
+
+      if (!column_key_.is_null() && valid_pos) {
         JSObject::AddProperty(stack_frame, column_key_,
-                              handle(Smi::FromInt(column_offset + 1), isolate_),
+                              handle(Smi::FromInt(info.column + 1), isolate_),
                               NONE);
       }
       JSObject::AddProperty(stack_frame, line_key_,
-                            handle(Smi::FromInt(line_number + 1), isolate_),
+                            handle(Smi::FromInt(info.line + 1), isolate_),
                             NONE);
     }
 
@@ -616,11 +606,10 @@
         factory()->NewJSObject(isolate_->object_function());
 
     if (!function_key_.is_null()) {
-      Handle<Object> fun_name = handle(frame->function_name(), isolate_);
-      if (fun_name->IsUndefined())
-        fun_name = isolate_->factory()->InternalizeUtf8String(
-            Vector<const char>("<WASM>"));
-      JSObject::AddProperty(stack_frame, function_key_, fun_name, NONE);
+      Handle<String> name = wasm::GetWasmFunctionName(
+          isolate_, handle(frame->wasm_obj(), isolate_),
+          frame->function_index());
+      JSObject::AddProperty(stack_frame, function_key_, name, NONE);
     }
     // Encode the function index as line number.
     if (!line_key_.is_null()) {
@@ -633,6 +622,8 @@
       Code* code = frame->LookupCode();
       int offset = static_cast<int>(frame->pc() - code->instruction_start());
       int position = code->SourcePosition(offset);
+      // Make position 1-based.
+      if (position >= 0) ++position;
       JSObject::AddProperty(stack_frame, column_key_,
                             isolate_->factory()->NewNumberFromInt(position),
                             NONE);
@@ -811,21 +802,6 @@
 }
 
 
-static inline AccessCheckInfo* GetAccessCheckInfo(Isolate* isolate,
-                                                  Handle<JSObject> receiver) {
-  Object* maybe_constructor = receiver->map()->GetConstructor();
-  if (!maybe_constructor->IsJSFunction()) return NULL;
-  JSFunction* constructor = JSFunction::cast(maybe_constructor);
-  if (!constructor->shared()->IsApiFunction()) return NULL;
-
-  Object* data_obj =
-     constructor->shared()->get_api_func_data()->access_check_info();
-  if (data_obj == isolate->heap()->undefined_value()) return NULL;
-
-  return AccessCheckInfo::cast(data_obj);
-}
-
-
 void Isolate::ReportFailedAccessCheck(Handle<JSObject> receiver) {
   if (!thread_local_top()->failed_access_check_callback_) {
     return ScheduleThrow(*factory()->NewTypeError(MessageTemplate::kNoAccess));
@@ -838,7 +814,7 @@
   HandleScope scope(this);
   Handle<Object> data;
   { DisallowHeapAllocation no_gc;
-    AccessCheckInfo* access_check_info = GetAccessCheckInfo(this, receiver);
+    AccessCheckInfo* access_check_info = AccessCheckInfo::Get(this, receiver);
     if (!access_check_info) {
       AllowHeapAllocation doesnt_matter_anymore;
       return ScheduleThrow(
@@ -886,18 +862,12 @@
   HandleScope scope(this);
   Handle<Object> data;
   v8::AccessCheckCallback callback = nullptr;
-  v8::NamedSecurityCallback named_callback = nullptr;
   { DisallowHeapAllocation no_gc;
-    AccessCheckInfo* access_check_info = GetAccessCheckInfo(this, receiver);
+    AccessCheckInfo* access_check_info = AccessCheckInfo::Get(this, receiver);
     if (!access_check_info) return false;
     Object* fun_obj = access_check_info->callback();
     callback = v8::ToCData<v8::AccessCheckCallback>(fun_obj);
     data = handle(access_check_info->data(), this);
-    if (!callback) {
-      fun_obj = access_check_info->named_callback();
-      named_callback = v8::ToCData<v8::NamedSecurityCallback>(fun_obj);
-      if (!named_callback) return false;
-    }
   }
 
   LOG(this, ApiSecurityCheck());
@@ -905,21 +875,12 @@
   {
     // Leaving JavaScript.
     VMState<EXTERNAL> state(this);
-    if (callback) {
-      return callback(v8::Utils::ToLocal(accessing_context),
-                      v8::Utils::ToLocal(receiver), v8::Utils::ToLocal(data));
-    }
-    Handle<Object> key = factory()->undefined_value();
-    return named_callback(v8::Utils::ToLocal(receiver), v8::Utils::ToLocal(key),
-                          v8::ACCESS_HAS, v8::Utils::ToLocal(data));
+    return callback(v8::Utils::ToLocal(accessing_context),
+                    v8::Utils::ToLocal(receiver), v8::Utils::ToLocal(data));
   }
 }
 
 
-const char* const Isolate::kStackOverflowMessage =
-  "Uncaught RangeError: Maximum call stack size exceeded";
-
-
 Object* Isolate::StackOverflow() {
   HandleScope scope(this);
   // At this point we cannot create an Error object using its javascript
@@ -1328,7 +1289,7 @@
   DCHECK(handler->rethrow_);
   DCHECK(handler->capture_message_);
   Object* message = reinterpret_cast<Object*>(handler->message_obj_);
-  DCHECK(message->IsJSMessageObject() || message->IsTheHole());
+  DCHECK(message->IsJSMessageObject() || message->IsTheHole(this));
   thread_local_top()->pending_message_obj_ = message;
 }
 
@@ -1397,7 +1358,8 @@
   if (!frame->is_java_script()) return false;
   JSFunction* fun = JavaScriptFrame::cast(frame)->function();
   Object* script = fun->shared()->script();
-  if (!script->IsScript() || (Script::cast(script)->source()->IsUndefined())) {
+  if (!script->IsScript() ||
+      (Script::cast(script)->source()->IsUndefined(this))) {
     return false;
   }
   Handle<Script> casted_script(Script::cast(script));
@@ -1462,7 +1424,7 @@
 
     Object* script = fun->shared()->script();
     if (script->IsScript() &&
-        !(Script::cast(script)->source()->IsUndefined())) {
+        !(Script::cast(script)->source()->IsUndefined(this))) {
       int pos = PositionFromStackTrace(elements, i);
       Handle<Script> casted_script(Script::cast(script));
       *target = MessageLocation(casted_script, pos, pos + 1);
@@ -1477,7 +1439,7 @@
                                                MessageLocation* location) {
   Handle<JSArray> stack_trace_object;
   if (capture_stack_trace_for_uncaught_exceptions_) {
-    if (Object::IsErrorObject(this, exception)) {
+    if (exception->IsJSError()) {
       // We fetch the stack trace that corresponds to this error object.
       // If the lookup fails, the exception is probably not a valid Error
       // object. In that case, we fall through and capture the stack trace
@@ -1586,7 +1548,7 @@
   }
 
   // Actually report the pending message to all message handlers.
-  if (!message_obj->IsTheHole() && should_report_exception) {
+  if (!message_obj->IsTheHole(this) && should_report_exception) {
     HandleScope scope(this);
     Handle<JSMessageObject> message(JSMessageObject::cast(message_obj));
     Handle<JSValue> script_wrapper(JSValue::cast(message->script()));
@@ -1603,7 +1565,7 @@
   DCHECK(has_pending_exception());
 
   if (thread_local_top_.pending_exception_ != heap()->termination_exception() &&
-      !thread_local_top_.pending_message_obj_->IsTheHole()) {
+      !thread_local_top_.pending_message_obj_->IsTheHole(this)) {
     Handle<JSMessageObject> message_obj(
         JSMessageObject::cast(thread_local_top_.pending_message_obj_));
     Handle<JSValue> script_wrapper(JSValue::cast(message_obj->script()));
@@ -1877,12 +1839,14 @@
       // TODO(bmeurer) Initialized lazily because it depends on flags; can
       // be fixed once the default isolate cleanup is done.
       random_number_generator_(NULL),
+      rail_mode_(PERFORMANCE_DEFAULT),
       serializer_enabled_(enable_serializer),
       has_fatal_error_(false),
       initialized_from_snapshot_(false),
       is_tail_call_elimination_enabled_(true),
       cpu_profiler_(NULL),
       heap_profiler_(NULL),
+      code_event_dispatcher_(new CodeEventDispatcher()),
       function_entry_hook_(NULL),
       deferred_handles_head_(NULL),
       optimizing_compile_dispatcher_(NULL),
@@ -2010,7 +1974,7 @@
   }
 
   // We must stop the logger before we tear down other components.
-  Sampler* sampler = logger_->sampler();
+  sampler::Sampler* sampler = logger_->sampler();
   if (sampler && sampler->IsActive()) sampler->Stop();
 
   delete deoptimizer_data_;
@@ -2040,6 +2004,8 @@
   delete cpu_profiler_;
   cpu_profiler_ = NULL;
 
+  code_event_dispatcher_.reset();
+
   delete root_index_map_;
   root_index_map_ = NULL;
 
@@ -2169,12 +2135,12 @@
   } else {
     v8::TryCatch* handler = try_catch_handler();
     DCHECK(thread_local_top_.pending_message_obj_->IsJSMessageObject() ||
-           thread_local_top_.pending_message_obj_->IsTheHole());
+           thread_local_top_.pending_message_obj_->IsTheHole(this));
     handler->can_continue_ = true;
     handler->has_terminated_ = false;
     handler->exception_ = pending_exception();
     // Propagate to the external try-catch only if we got an actual message.
-    if (thread_local_top_.pending_message_obj_->IsTheHole()) return true;
+    if (thread_local_top_.pending_message_obj_->IsTheHole(this)) return true;
 
     handler->message_obj_ = thread_local_top_.pending_message_obj_;
   }
@@ -2331,13 +2297,10 @@
            Internals::kIsolateEmbedderDataOffset);
   CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.roots_)),
            Internals::kIsolateRootsOffset);
-  CHECK_EQ(static_cast<int>(
-               OFFSET_OF(Isolate, heap_.amount_of_external_allocated_memory_)),
-           Internals::kAmountOfExternalAllocatedMemoryOffset);
-  CHECK_EQ(static_cast<int>(OFFSET_OF(
-               Isolate,
-               heap_.amount_of_external_allocated_memory_at_last_global_gc_)),
-           Internals::kAmountOfExternalAllocatedMemoryAtLastGlobalGCOffset);
+  CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.external_memory_)),
+           Internals::kExternalMemoryOffset);
+  CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.external_memory_limit_)),
+           Internals::kExternalMemoryLimitOffset);
 
   time_millis_at_init_ = heap_.MonotonicallyIncreasingTimeInMs();
 
@@ -2471,8 +2434,17 @@
 
 void Isolate::DumpAndResetCompilationStats() {
   if (turbo_statistics() != nullptr) {
+    DCHECK(FLAG_turbo_stats || FLAG_turbo_stats_nvp);
+
     OFStream os(stdout);
-    os << *turbo_statistics() << std::endl;
+    if (FLAG_turbo_stats) {
+      AsPrintableStatistics ps = {*turbo_statistics(), false};
+      os << ps << std::endl;
+    }
+    if (FLAG_turbo_stats_nvp) {
+      AsPrintableStatistics ps = {*turbo_statistics(), true};
+      os << ps << std::endl;
+    }
   }
   if (hstatistics() != nullptr) hstatistics()->Print();
   delete turbo_statistics_;
@@ -2516,7 +2488,7 @@
     DisallowHeapAllocation no_gc;
     Object* const initial_js_array_map =
         context()->native_context()->get(Context::ArrayMapIndex(kind));
-    if (!initial_js_array_map->IsUndefined()) {
+    if (!initial_js_array_map->IsUndefined(this)) {
       return Map::cast(initial_js_array_map);
     }
   }
@@ -2532,7 +2504,7 @@
 
 bool Isolate::IsArrayOrObjectPrototype(Object* object) {
   Object* context = heap()->native_contexts_list();
-  while (context != heap()->undefined_value()) {
+  while (!context->IsUndefined(this)) {
     Context* current_context = Context::cast(context);
     if (current_context->initial_object_prototype() == object ||
         current_context->initial_array_prototype() == object) {
@@ -2546,7 +2518,7 @@
 bool Isolate::IsInAnyContext(Object* object, uint32_t index) {
   DisallowHeapAllocation no_gc;
   Object* context = heap()->native_contexts_list();
-  while (context != heap()->undefined_value()) {
+  while (!context->IsUndefined(this)) {
     Context* current_context = Context::cast(context);
     if (current_context->get(index) == object) {
       return true;
@@ -2630,7 +2602,7 @@
   Handle<Symbol> key = factory()->is_concat_spreadable_symbol();
   Handle<Object> value;
   LookupIterator it(array_prototype, key);
-  if (it.IsFound() && !JSReceiver::GetDataProperty(&it)->IsUndefined()) {
+  if (it.IsFound() && !JSReceiver::GetDataProperty(&it)->IsUndefined(this)) {
     // TODO(cbruni): Currently we do not revert if we unset the
     // @@isConcatSpreadable property on Array.prototype or Object.prototype
     // hence the reverse implication doesn't hold.
@@ -2670,7 +2642,6 @@
 }
 
 void Isolate::InvalidateArraySpeciesProtector() {
-  if (!FLAG_harmony_species) return;
   DCHECK(factory()->species_protector()->value()->IsSmi());
   DCHECK(IsArraySpeciesLookupChainIntact());
   factory()->species_protector()->set_value(
@@ -2786,16 +2757,19 @@
 
 
 void Isolate::FireCallCompletedCallback() {
-  bool has_call_completed_callbacks = !call_completed_callbacks_.is_empty();
+  if (!handle_scope_implementer()->CallDepthIsZero()) return;
+
   bool run_microtasks =
       pending_microtask_count() &&
       !handle_scope_implementer()->HasMicrotasksSuppressions() &&
       handle_scope_implementer()->microtasks_policy() ==
           v8::MicrotasksPolicy::kAuto;
-  if (!has_call_completed_callbacks && !run_microtasks) return;
 
-  if (!handle_scope_implementer()->CallDepthIsZero()) return;
   if (run_microtasks) RunMicrotasks();
+  // Prevent stepping from spilling into the next call made by the embedder.
+  if (debug()->is_active()) debug()->ClearStepping();
+
+  if (call_completed_callbacks_.is_empty()) return;
   // Fire callbacks.  Increase call depth to prevent recursive callbacks.
   v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this);
   v8::Isolate::SuppressMicrotaskExecutionScope suppress(isolate);
@@ -2836,7 +2810,7 @@
     queue = factory()->CopyFixedArrayAndGrow(queue, num_tasks);
     heap()->set_microtask_queue(*queue);
   }
-  DCHECK(queue->get(num_tasks)->IsUndefined());
+  DCHECK(queue->get(num_tasks)->IsUndefined(this));
   queue->set(num_tasks, *microtask);
   set_pending_microtask_count(num_tasks + 1);
 }
@@ -3020,6 +2994,12 @@
   }
 }
 
+void Isolate::SetRAILMode(RAILMode rail_mode) {
+  rail_mode_ = rail_mode;
+  if (FLAG_trace_rail) {
+    PrintIsolate(this, "RAIL mode: %s\n", RAILModeName(rail_mode_));
+  }
+}
 
 bool StackLimitCheck::JsHasOverflowed(uintptr_t gap) const {
   StackGuard* stack_guard = isolate_->stack_guard();
diff --git a/src/isolate.h b/src/isolate.h
index 5895ebb..4ca842e 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -5,6 +5,7 @@
 #ifndef V8_ISOLATE_H_
 #define V8_ISOLATE_H_
 
+#include <memory>
 #include <queue>
 #include <set>
 
@@ -13,6 +14,7 @@
 #include "src/assert-scope.h"
 #include "src/base/accounting-allocator.h"
 #include "src/base/atomicops.h"
+#include "src/base/hashmap.h"
 #include "src/builtins.h"
 #include "src/cancelable-task.h"
 #include "src/contexts.h"
@@ -22,7 +24,6 @@
 #include "src/futex-emulation.h"
 #include "src/global-handles.h"
 #include "src/handles.h"
-#include "src/hashmap.h"
 #include "src/heap/heap.h"
 #include "src/messages.h"
 #include "src/optimizing-compile-dispatcher.h"
@@ -42,6 +43,8 @@
 class BasicBlockProfiler;
 class Bootstrapper;
 class CallInterfaceDescriptorData;
+class CodeAgingHelper;
+class CodeEventDispatcher;
 class CodeGenerator;
 class CodeRange;
 class CodeStubDescriptor;
@@ -66,7 +69,7 @@
 class InnerPointerToCodeCache;
 class Logger;
 class MaterializedObjectStore;
-class CodeAgingHelper;
+class PositionsRecorder;
 class RegExpStack;
 class SaveContext;
 class StatsTable;
@@ -124,6 +127,17 @@
 #define RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, T) \
   RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, MaybeHandle<T>())
 
+#define RETURN_RESULT_OR_FAILURE(isolate, call)     \
+  do {                                              \
+    Handle<Object> __result__;                      \
+    Isolate* __isolate__ = (isolate);               \
+    if (!(call).ToHandle(&__result__)) {            \
+      DCHECK(__isolate__->has_pending_exception()); \
+      return __isolate__->heap()->exception();      \
+    }                                               \
+    return *__result__;                             \
+  } while (false)
+
 #define ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, value)  \
   do {                                                               \
     if (!(call).ToHandle(&dst)) {                                    \
@@ -132,21 +146,26 @@
     }                                                                \
   } while (false)
 
-#define ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call)  \
-  ASSIGN_RETURN_ON_EXCEPTION_VALUE(                             \
-      isolate, dst, call, isolate->heap()->exception())
+#define ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call)          \
+  do {                                                                  \
+    Isolate* __isolate__ = (isolate);                                   \
+    ASSIGN_RETURN_ON_EXCEPTION_VALUE(__isolate__, dst, call,            \
+                                     __isolate__->heap()->exception()); \
+  } while (false)
 
 #define ASSIGN_RETURN_ON_EXCEPTION(isolate, dst, call, T)  \
   ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, MaybeHandle<T>())
 
-#define THROW_NEW_ERROR(isolate, call, T)               \
-  do {                                                  \
-    return isolate->Throw<T>(isolate->factory()->call); \
+#define THROW_NEW_ERROR(isolate, call, T)                       \
+  do {                                                          \
+    Isolate* __isolate__ = (isolate);                           \
+    return __isolate__->Throw<T>(__isolate__->factory()->call); \
   } while (false)
 
-#define THROW_NEW_ERROR_RETURN_FAILURE(isolate, call) \
-  do {                                                \
-    return isolate->Throw(*isolate->factory()->call); \
+#define THROW_NEW_ERROR_RETURN_FAILURE(isolate, call)         \
+  do {                                                        \
+    Isolate* __isolate__ = (isolate);                         \
+    return __isolate__->Throw(*__isolate__->factory()->call); \
   } while (false)
 
 #define RETURN_ON_EXCEPTION_VALUE(isolate, call, value)            \
@@ -157,8 +176,12 @@
     }                                                              \
   } while (false)
 
-#define RETURN_FAILURE_ON_EXCEPTION(isolate, call)  \
-  RETURN_ON_EXCEPTION_VALUE(isolate, call, isolate->heap()->exception())
+#define RETURN_FAILURE_ON_EXCEPTION(isolate, call)               \
+  do {                                                           \
+    Isolate* __isolate__ = (isolate);                            \
+    RETURN_ON_EXCEPTION_VALUE(__isolate__, call,                 \
+                              __isolate__->heap()->exception()); \
+  } while (false);
 
 #define RETURN_ON_EXCEPTION(isolate, call, T)  \
   RETURN_ON_EXCEPTION_VALUE(isolate, call, MaybeHandle<T>())
@@ -339,9 +362,9 @@
 
 #if USE_SIMULATOR
 
-#define ISOLATE_INIT_SIMULATOR_LIST(V)                                         \
-  V(bool, simulator_initialized, false)                                        \
-  V(HashMap*, simulator_i_cache, NULL)                                         \
+#define ISOLATE_INIT_SIMULATOR_LIST(V)       \
+  V(bool, simulator_initialized, false)      \
+  V(base::HashMap*, simulator_i_cache, NULL) \
   V(Redirection*, simulator_redirection, NULL)
 #else
 
@@ -352,10 +375,10 @@
 
 #ifdef DEBUG
 
-#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)                                       \
-  V(CommentStatistic, paged_space_comments_statistics,                         \
-      CommentStatistic::kMaxComments + 1)                                      \
-  V(int, code_kind_statistics, Code::NUMBER_OF_KINDS)
+#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)               \
+  V(CommentStatistic, paged_space_comments_statistics, \
+    CommentStatistic::kMaxComments + 1)                \
+  V(int, code_kind_statistics, AbstractCode::NUMBER_OF_KINDS)
 #else
 
 #define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
@@ -373,31 +396,34 @@
 
 typedef List<HeapObject*> DebugObjectCache;
 
-#define ISOLATE_INIT_LIST(V)                                                   \
-  /* Assembler state. */                                                       \
-  V(FatalErrorCallback, exception_behavior, NULL)                              \
-  V(LogEventCallback, event_logger, NULL)                                      \
-  V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, NULL)     \
-  /* To distinguish the function templates, so that we can find them in the */ \
-  /* function cache of the native context. */                                  \
-  V(int, next_serial_number, 0)                                                \
-  V(ExternalReferenceRedirectorPointer*, external_reference_redirector, NULL)  \
-  /* State for Relocatable. */                                                 \
-  V(Relocatable*, relocatable_top, NULL)                                       \
-  V(DebugObjectCache*, string_stream_debug_object_cache, NULL)                 \
-  V(Object*, string_stream_current_security_token, NULL)                       \
-  V(ExternalReferenceTable*, external_reference_table, NULL)                   \
-  V(HashMap*, external_reference_map, NULL)                                    \
-  V(HashMap*, root_index_map, NULL)                                            \
-  V(int, pending_microtask_count, 0)                                           \
-  V(HStatistics*, hstatistics, NULL)                                           \
-  V(CompilationStatistics*, turbo_statistics, NULL)                            \
-  V(HTracer*, htracer, NULL)                                                   \
-  V(CodeTracer*, code_tracer, NULL)                                            \
-  V(bool, fp_stubs_generated, false)                                           \
-  V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu)                            \
-  V(PromiseRejectCallback, promise_reject_callback, NULL)                      \
-  V(const v8::StartupData*, snapshot_blob, NULL)                               \
+#define ISOLATE_INIT_LIST(V)                                                  \
+  /* Assembler state. */                                                      \
+  V(FatalErrorCallback, exception_behavior, nullptr)                          \
+  V(LogEventCallback, event_logger, nullptr)                                  \
+  V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr) \
+  V(ExternalReferenceRedirectorPointer*, external_reference_redirector,       \
+    nullptr)                                                                  \
+  /* State for Relocatable. */                                                \
+  V(Relocatable*, relocatable_top, nullptr)                                   \
+  V(DebugObjectCache*, string_stream_debug_object_cache, nullptr)             \
+  V(Object*, string_stream_current_security_token, nullptr)                   \
+  V(ExternalReferenceTable*, external_reference_table, nullptr)               \
+  V(intptr_t*, api_external_references, nullptr)                              \
+  V(base::HashMap*, external_reference_map, nullptr)                          \
+  V(base::HashMap*, root_index_map, nullptr)                                  \
+  V(int, pending_microtask_count, 0)                                          \
+  V(HStatistics*, hstatistics, nullptr)                                       \
+  V(CompilationStatistics*, turbo_statistics, nullptr)                        \
+  V(HTracer*, htracer, nullptr)                                               \
+  V(CodeTracer*, code_tracer, nullptr)                                        \
+  V(bool, fp_stubs_generated, false)                                          \
+  V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu)                           \
+  V(PromiseRejectCallback, promise_reject_callback, nullptr)                  \
+  V(const v8::StartupData*, snapshot_blob, nullptr)                           \
+  V(int, code_and_metadata_size, 0)                                           \
+  V(int, bytecode_and_metadata_size, 0)                                       \
+  /* true if being profiled. Causes collection of extra compile info. */      \
+  V(bool, is_profiling, false)                                                \
   ISOLATE_INIT_SIMULATOR_LIST(V)
 
 #define THREAD_LOCAL_TOP_ACCESSOR(type, name)                        \
@@ -765,8 +791,6 @@
   char* ArchiveThread(char* to);
   char* RestoreThread(char* from);
 
-  static const char* const kStackOverflowMessage;
-
   static const int kUC16AlphabetSize = 256;  // See StringSearchBase.
   static const int kBMMaxShift = 250;        // See StringSearchBase.
 
@@ -894,6 +918,10 @@
 
   Debug* debug() { return debug_; }
 
+  bool* is_profiling_address() { return &is_profiling_; }
+  CodeEventDispatcher* code_event_dispatcher() const {
+    return code_event_dispatcher_.get();
+  }
   CpuProfiler* cpu_profiler() const { return cpu_profiler_; }
   HeapProfiler* heap_profiler() const { return heap_profiler_; }
 
@@ -1110,6 +1138,8 @@
 
   bool IsInAnyContext(Object* object, uint32_t index);
 
+  void SetRAILMode(RAILMode rail_mode);
+
  protected:
   explicit Isolate(bool enable_serializer);
   bool IsArrayOrObjectPrototype(Object* object);
@@ -1221,6 +1251,24 @@
 
   void RunMicrotasksInternal();
 
+  const char* RAILModeName(RAILMode rail_mode) const {
+    switch (rail_mode) {
+      case PERFORMANCE_DEFAULT:
+        return "DEFAULT";
+      case PERFORMANCE_RESPONSE:
+        return "RESPONSE";
+      case PERFORMANCE_ANIMATION:
+        return "ANIMATION";
+      case PERFORMANCE_IDLE:
+        return "IDLE";
+      case PERFORMANCE_LOAD:
+        return "LOAD";
+      default:
+        UNREACHABLE();
+    }
+    return "";
+  }
+
   base::Atomic32 id_;
   EntryStackItem* entry_stack_;
   int stack_trace_nesting_level_;
@@ -1267,6 +1315,7 @@
   DateCache* date_cache_;
   CallInterfaceDescriptorData* call_descriptor_data_;
   base::RandomNumberGenerator* random_number_generator_;
+  RAILMode rail_mode_;
 
   // Whether the isolate has been created for snapshotting.
   bool serializer_enabled_;
@@ -1292,6 +1341,7 @@
   Debug* debug_;
   CpuProfiler* cpu_profiler_;
   HeapProfiler* heap_profiler_;
+  std::unique_ptr<CodeEventDispatcher> code_event_dispatcher_;
   FunctionEntryHook function_entry_hook_;
 
   interpreter::Interpreter* interpreter_;
@@ -1373,6 +1423,7 @@
   friend class v8::Isolate;
   friend class v8::Locker;
   friend class v8::Unlocker;
+  friend class v8::SnapshotCreator;
   friend v8::StartupData v8::V8::CreateSnapshotDataBlob(const char*);
   friend v8::StartupData v8::V8::WarmUpSnapshotDataBlob(v8::StartupData,
                                                         const char*);
diff --git a/src/js/array-iterator.js b/src/js/array-iterator.js
index b3e25e9..8203f1f 100644
--- a/src/js/array-iterator.js
+++ b/src/js/array-iterator.js
@@ -22,7 +22,7 @@
 var iteratorSymbol = utils.ImportNow("iterator_symbol");
 var MakeTypeError;
 var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-var GlobalTypedArray = global.Uint8Array.__proto__;
+var GlobalTypedArray = %object_get_prototype_of(global.Uint8Array);
 
 utils.Import(function(from) {
   MakeTypeError = from.MakeTypeError;
diff --git a/src/js/array.js b/src/js/array.js
index 0a77b23..c29b8f7 100644
--- a/src/js/array.js
+++ b/src/js/array.js
@@ -11,7 +11,6 @@
 // -------------------------------------------------------------------
 // Imports
 
-var FLAG_harmony_species;
 var GetIterator;
 var GetMethod;
 var GlobalArray = global.Array;
@@ -23,6 +22,7 @@
 var ObjectHasOwnProperty;
 var ObjectToString = utils.ImportNow("object_to_string");
 var iteratorSymbol = utils.ImportNow("iterator_symbol");
+var speciesSymbol = utils.ImportNow("species_symbol");
 var unscopablesSymbol = utils.ImportNow("unscopables_symbol");
 
 utils.Import(function(from) {
@@ -34,23 +34,12 @@
   ObjectHasOwnProperty = from.ObjectHasOwnProperty;
 });
 
-utils.ImportFromExperimental(function(from) {
-  FLAG_harmony_species = from.FLAG_harmony_species;
-});
-
 // -------------------------------------------------------------------
 
 
 function ArraySpeciesCreate(array, length) {
-  var constructor;
-
   length = INVERT_NEG_ZERO(length);
-
-  if (FLAG_harmony_species) {
-    constructor = %ArraySpeciesConstructor(array);
-  } else {
-    constructor = GlobalArray;
-  }
+  var constructor = %ArraySpeciesConstructor(array);
   return new constructor(length);
 }
 
@@ -328,10 +317,9 @@
 // because the receiver is not an array (so we have no choice) or because we
 // know we are not deleting or moving a lot of elements.
 function SimpleSlice(array, start_i, del_count, len, deleted_elements) {
-  var is_array = IS_ARRAY(array);
   for (var i = 0; i < del_count; i++) {
     var index = start_i + i;
-    if (HAS_INDEX(array, index, is_array)) {
+    if (index in array) {
       var current = array[index];
       %CreateDataProperty(deleted_elements, i, current);
     }
@@ -340,7 +328,6 @@
 
 
 function SimpleMove(array, start_i, del_count, len, num_additional_args) {
-  var is_array = IS_ARRAY(array);
   if (num_additional_args !== del_count) {
     // Move the existing elements after the elements to be deleted
     // to the right position in the resulting array.
@@ -348,7 +335,7 @@
       for (var i = len - del_count; i > start_i; i--) {
         var from_index = i + del_count - 1;
         var to_index = i + num_additional_args - 1;
-        if (HAS_INDEX(array, from_index, is_array)) {
+        if (from_index in array) {
           array[to_index] = array[from_index];
         } else {
           delete array[to_index];
@@ -358,7 +345,7 @@
       for (var i = start_i; i < len - del_count; i++) {
         var from_index = i + del_count;
         var to_index = i + num_additional_args;
-        if (HAS_INDEX(array, from_index, is_array)) {
+        if (from_index in array) {
           array[to_index] = array[from_index];
         } else {
           delete array[to_index];
@@ -661,7 +648,7 @@
 
   if (UseSparseVariant(array, len, IS_ARRAY(array), end_i - start_i)) {
     %NormalizeElements(array);
-    %NormalizeElements(result);
+    if (IS_ARRAY(result)) %NormalizeElements(result);
     SparseSlice(array, start_i, end_i - start_i, len, result);
   } else {
     SimpleSlice(array, start_i, end_i - start_i, len, result);
@@ -731,7 +718,7 @@
   }
   if (UseSparseVariant(array, len, IS_ARRAY(array), changed_elements)) {
     %NormalizeElements(array);
-    %NormalizeElements(deleted_elements);
+    if (IS_ARRAY(deleted_elements)) %NormalizeElements(deleted_elements);
     SparseSlice(array, start_i, del_count, len, deleted_elements);
     SparseMove(array, start_i, del_count, len, num_elements_to_add);
   } else {
@@ -1055,9 +1042,8 @@
 // or delete elements from the array.
 function InnerArrayFilter(f, receiver, array, length, result) {
   var result_length = 0;
-  var is_array = IS_ARRAY(array);
   for (var i = 0; i < length; i++) {
-    if (HAS_INDEX(array, i, is_array)) {
+    if (i in array) {
       var element = array[i];
       if (%_Call(f, receiver, element, i, array)) {
         %CreateDataProperty(result, result_length, element);
@@ -1086,17 +1072,16 @@
 function InnerArrayForEach(f, receiver, array, length) {
   if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
 
-  var is_array = IS_ARRAY(array);
   if (IS_UNDEFINED(receiver)) {
     for (var i = 0; i < length; i++) {
-      if (HAS_INDEX(array, i, is_array)) {
+      if (i in array) {
         var element = array[i];
         f(element, i, array);
       }
     }
   } else {
     for (var i = 0; i < length; i++) {
-      if (HAS_INDEX(array, i, is_array)) {
+      if (i in array) {
         var element = array[i];
         %_Call(f, receiver, element, i, array);
       }
@@ -1119,9 +1104,8 @@
 function InnerArraySome(f, receiver, array, length) {
   if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
 
-  var is_array = IS_ARRAY(array);
   for (var i = 0; i < length; i++) {
-    if (HAS_INDEX(array, i, is_array)) {
+    if (i in array) {
       var element = array[i];
       if (%_Call(f, receiver, element, i, array)) return true;
     }
@@ -1146,9 +1130,8 @@
 function InnerArrayEvery(f, receiver, array, length) {
   if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
 
-  var is_array = IS_ARRAY(array);
   for (var i = 0; i < length; i++) {
-    if (HAS_INDEX(array, i, is_array)) {
+    if (i in array) {
       var element = array[i];
       if (!%_Call(f, receiver, element, i, array)) return false;
     }
@@ -1176,9 +1159,8 @@
   var length = TO_LENGTH(array.length);
   if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
   var result = ArraySpeciesCreate(array, length);
-  var is_array = IS_ARRAY(array);
   for (var i = 0; i < length; i++) {
-    if (HAS_INDEX(array, i, is_array)) {
+    if (i in array) {
       var element = array[i];
       %CreateDataProperty(result, i, %_Call(f, receiver, element, i, array));
     }
@@ -1317,11 +1299,10 @@
     throw MakeTypeError(kCalledNonCallable, callback);
   }
 
-  var is_array = IS_ARRAY(array);
   var i = 0;
   find_initial: if (argumentsLength < 2) {
     for (; i < length; i++) {
-      if (HAS_INDEX(array, i, is_array)) {
+      if (i in array) {
         current = array[i++];
         break find_initial;
       }
@@ -1330,7 +1311,7 @@
   }
 
   for (; i < length; i++) {
-    if (HAS_INDEX(array, i, is_array)) {
+    if (i in array) {
       var element = array[i];
       current = callback(current, element, i, array);
     }
@@ -1357,11 +1338,10 @@
     throw MakeTypeError(kCalledNonCallable, callback);
   }
 
-  var is_array = IS_ARRAY(array);
   var i = length - 1;
   find_initial: if (argumentsLength < 2) {
     for (; i >= 0; i--) {
-      if (HAS_INDEX(array, i, is_array)) {
+      if (i in array) {
         current = array[i--];
         break find_initial;
       }
@@ -1370,7 +1350,7 @@
   }
 
   for (; i >= 0; i--) {
-    if (HAS_INDEX(array, i, is_array)) {
+    if (i in array) {
       var element = array[i];
       current = callback(current, element, i, array);
     }
@@ -1651,6 +1631,12 @@
   return array;
 }
 
+
+function ArraySpecies() {
+  return this;
+}
+
+
 // -------------------------------------------------------------------
 
 // Set up non-enumerable constructor property on the Array.prototype
@@ -1666,6 +1652,7 @@
   fill: true,
   find: true,
   findIndex: true,
+  includes: true,
   keys: true,
 };
 
@@ -1725,6 +1712,8 @@
   "includes", getFunction("includes", ArrayIncludes, 1),
 ]);
 
+utils.InstallGetter(GlobalArray, speciesSymbol, ArraySpecies);
+
 %FinishArrayPrototypeSetup(GlobalArray.prototype);
 
 // The internal Array prototype doesn't need to be fancy, since it's never
@@ -1784,10 +1773,6 @@
   to.InnerArraySort = InnerArraySort;
   to.InnerArrayToLocaleString = InnerArrayToLocaleString;
   to.PackedArrayReverse = PackedArrayReverse;
-  to.Stack = Stack;
-  to.StackHas = StackHas;
-  to.StackPush = StackPush;
-  to.StackPop = StackPop;
 });
 
 %InstallToContext([
diff --git a/src/js/arraybuffer.js b/src/js/arraybuffer.js
index e739960..b602dcb 100644
--- a/src/js/arraybuffer.js
+++ b/src/js/arraybuffer.js
@@ -16,6 +16,7 @@
 var MaxSimple;
 var MinSimple;
 var SpeciesConstructor;
+var speciesSymbol = utils.ImportNow("species_symbol");
 
 utils.Import(function(from) {
   MakeTypeError = from.MakeTypeError;
@@ -84,6 +85,13 @@
   return result;
 }
 
+
+function ArrayBufferSpecies() {
+  return this;
+}
+
+utils.InstallGetter(GlobalArrayBuffer, speciesSymbol, ArrayBufferSpecies);
+
 utils.InstallGetter(GlobalArrayBuffer.prototype, "byteLength",
                     ArrayBufferGetByteLen);
 
diff --git a/src/js/collection.js b/src/js/collection.js
index 0d7195d..bbb7ed2 100644
--- a/src/js/collection.js
+++ b/src/js/collection.js
@@ -19,6 +19,7 @@
 var MapIterator;
 var NumberIsNaN;
 var SetIterator;
+var speciesSymbol = utils.ImportNow("species_symbol");
 var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
 
 utils.Import(function(from) {
@@ -255,6 +256,12 @@
   }
 }
 
+
+function SetSpecies() {
+  return this;
+}
+
+
 // -------------------------------------------------------------------
 
 %SetCode(GlobalSet, SetConstructor);
@@ -266,6 +273,8 @@
 
 %FunctionSetLength(SetForEach, 1);
 
+utils.InstallGetter(GlobalSet, speciesSymbol, SetSpecies);
+
 // Set up the non-enumerable functions on the Set prototype object.
 utils.InstallGetter(GlobalSet.prototype, "size", SetGetSize);
 utils.InstallFunctions(GlobalSet.prototype, DONT_ENUM, [
@@ -435,6 +444,11 @@
   }
 }
 
+
+function MapSpecies() {
+  return this;
+}
+
 // -------------------------------------------------------------------
 
 %SetCode(GlobalMap, MapConstructor);
@@ -446,6 +460,8 @@
 
 %FunctionSetLength(MapForEach, 1);
 
+utils.InstallGetter(GlobalMap, speciesSymbol, MapSpecies);
+
 // Set up the non-enumerable functions on the Map prototype object.
 utils.InstallGetter(GlobalMap.prototype, "size", MapGetSize);
 utils.InstallFunctions(GlobalMap.prototype, DONT_ENUM, [
diff --git a/src/js/harmony-regexp-exec.js b/src/js/harmony-regexp-exec.js
deleted file mode 100644
index e2eece9..0000000
--- a/src/js/harmony-regexp-exec.js
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalRegExp = global.RegExp;
-var RegExpSubclassExecJS = utils.ImportNow("RegExpSubclassExecJS");
-var RegExpSubclassMatch = utils.ImportNow("RegExpSubclassMatch");
-var RegExpSubclassReplace = utils.ImportNow("RegExpSubclassReplace");
-var RegExpSubclassSearch = utils.ImportNow("RegExpSubclassSearch");
-var RegExpSubclassSplit = utils.ImportNow("RegExpSubclassSplit");
-var RegExpSubclassTest = utils.ImportNow("RegExpSubclassTest");
-var matchSymbol = utils.ImportNow("match_symbol");
-var replaceSymbol = utils.ImportNow("replace_symbol");
-var searchSymbol = utils.ImportNow("search_symbol");
-var splitSymbol = utils.ImportNow("split_symbol");
-
-utils.OverrideFunction(GlobalRegExp.prototype, "exec",
-                       RegExpSubclassExecJS, true);
-utils.OverrideFunction(GlobalRegExp.prototype, matchSymbol,
-                       RegExpSubclassMatch, true);
-utils.OverrideFunction(GlobalRegExp.prototype, replaceSymbol,
-                       RegExpSubclassReplace, true);
-utils.OverrideFunction(GlobalRegExp.prototype, searchSymbol,
-                       RegExpSubclassSearch, true);
-utils.OverrideFunction(GlobalRegExp.prototype, splitSymbol,
-                       RegExpSubclassSplit, true);
-utils.OverrideFunction(GlobalRegExp.prototype, "test",
-                       RegExpSubclassTest, true);
-
-})
diff --git a/src/js/harmony-species.js b/src/js/harmony-species.js
deleted file mode 100644
index 426ac46..0000000
--- a/src/js/harmony-species.js
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils, extrasUtils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-var GlobalArray = global.Array;
-// It is important that this file is run after src/js/typedarray.js,
-// otherwise GlobalTypedArray would be Object, and we would break
-// old versions of Zepto.
-var GlobalTypedArray = global.Uint8Array.__proto__;
-var GlobalMap = global.Map;
-var GlobalSet = global.Set;
-var GlobalArrayBuffer = global.ArrayBuffer;
-var GlobalPromise = global.Promise;
-var GlobalRegExp = global.RegExp;
-var speciesSymbol = utils.ImportNow("species_symbol");
-
-function ArraySpecies() {
-  return this;
-}
-
-function TypedArraySpecies() {
-  return this;
-}
-
-function MapSpecies() {
-  return this;
-}
-
-function SetSpecies() {
-  return this;
-}
-
-function ArrayBufferSpecies() {
-  return this;
-}
-
-function PromiseSpecies() {
-  return this;
-}
-
-function RegExpSpecies() {
-  return this;
-}
-
-utils.InstallGetter(GlobalArray, speciesSymbol, ArraySpecies, DONT_ENUM);
-utils.InstallGetter(GlobalTypedArray, speciesSymbol, TypedArraySpecies, DONT_ENUM);
-utils.InstallGetter(GlobalMap, speciesSymbol, MapSpecies, DONT_ENUM);
-utils.InstallGetter(GlobalSet, speciesSymbol, SetSpecies, DONT_ENUM);
-utils.InstallGetter(GlobalArrayBuffer, speciesSymbol, ArrayBufferSpecies,
-                    DONT_ENUM);
-utils.InstallGetter(GlobalPromise, speciesSymbol, PromiseSpecies, DONT_ENUM);
-utils.InstallGetter(GlobalRegExp, speciesSymbol, RegExpSpecies, DONT_ENUM);
-
-});
diff --git a/src/js/harmony-unicode-regexps.js b/src/js/harmony-unicode-regexps.js
deleted file mode 100644
index 16d06ba..0000000
--- a/src/js/harmony-unicode-regexps.js
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-'use strict';
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalRegExp = global.RegExp;
-var GlobalRegExpPrototype = GlobalRegExp.prototype;
-var MakeTypeError;
-
-utils.Import(function(from) {
-  MakeTypeError = from.MakeTypeError;
-});
-
-// -------------------------------------------------------------------
-
-// ES6 21.2.5.15.
-function RegExpGetUnicode() {
-  if (!IS_REGEXP(this)) {
-    // TODO(littledan): Remove this RegExp compat workaround
-    if (this === GlobalRegExpPrototype) {
-      %IncrementUseCounter(kRegExpPrototypeUnicodeGetter);
-      return UNDEFINED;
-    }
-    throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.unicode");
-  }
-  return TO_BOOLEAN(REGEXP_UNICODE(this));
-}
-%SetForceInlineFlag(RegExpGetUnicode);
-
-utils.InstallGetter(GlobalRegExp.prototype, 'unicode', RegExpGetUnicode);
-
-})
diff --git a/src/js/i18n.js b/src/js/i18n.js
index 7c9535b..6c769a7 100644
--- a/src/js/i18n.js
+++ b/src/js/i18n.js
@@ -21,7 +21,6 @@
 var ArrayJoin;
 var ArrayPush;
 var FLAG_intl_extra;
-var GlobalBoolean = global.Boolean;
 var GlobalDate = global.Date;
 var GlobalNumber = global.Number;
 var GlobalRegExp = global.RegExp;
@@ -31,7 +30,6 @@
 var InternalArray = utils.InternalArray;
 var InternalRegExpMatch;
 var InternalRegExpReplace
-var IsFinite;
 var IsNaN;
 var MakeError;
 var MakeRangeError;
@@ -51,7 +49,6 @@
   ArrayIndexOf = from.ArrayIndexOf;
   ArrayJoin = from.ArrayJoin;
   ArrayPush = from.ArrayPush;
-  IsFinite = from.IsFinite;
   IsNaN = from.IsNaN;
   MakeError = from.MakeError;
   MakeRangeError = from.MakeRangeError;
@@ -285,7 +282,7 @@
 
   var matcher = options.localeMatcher;
   if (!IS_UNDEFINED(matcher)) {
-    matcher = GlobalString(matcher);
+    matcher = TO_STRING(matcher);
     if (matcher !== 'lookup' && matcher !== 'best fit') {
       throw MakeRangeError(kLocaleMatcher, matcher);
     }
@@ -366,13 +363,13 @@
       var value = options[property];
       switch (type) {
         case 'boolean':
-          value = GlobalBoolean(value);
+          value = TO_BOOLEAN(value);
           break;
         case 'string':
-          value = GlobalString(value);
+          value = TO_STRING(value);
           break;
         case 'number':
-          value = GlobalNumber(value);
+          value = TO_NUMBER(value);
           break;
         default:
           throw MakeError(kWrongValueType);
@@ -523,7 +520,7 @@
   var extension = '';
 
   var updateExtension = function updateExtension(key, value) {
-    return '-' + key + '-' + GlobalString(value);
+    return '-' + key + '-' + TO_STRING(value);
   }
 
   var updateProperty = function updateProperty(property, type, value) {
@@ -742,7 +739,7 @@
     return localeID;
   }
 
-  var localeString = GlobalString(localeID);
+  var localeString = TO_STRING(localeID);
 
   if (isValidLanguageTag(localeString) === false) {
     throw MakeRangeError(kInvalidLanguageTag, localeString);
@@ -1078,7 +1075,7 @@
  */
 function compare(collator, x, y) {
   return %InternalCompare(%GetImplFromInitializedIntlObject(collator),
-                          GlobalString(x), GlobalString(y));
+                          TO_STRING(x), TO_STRING(y));
 };
 
 
@@ -1102,8 +1099,8 @@
 function getNumberOption(options, property, min, max, fallback) {
   var value = options[property];
   if (!IS_UNDEFINED(value)) {
-    value = GlobalNumber(value);
-    if (IsNaN(value) || value < min || value > max) {
+    value = TO_NUMBER(value);
+    if (NUMBER_IS_NAN(value) || value < min || value > max) {
       throw MakeRangeError(kPropertyValueOutOfRange, property);
     }
     return %math_floor(value);
@@ -1348,7 +1345,7 @@
  */
 function IntlParseNumber(formatter, value) {
   return %InternalNumberParse(%GetImplFromInitializedIntlObject(formatter),
-                              GlobalString(value));
+                              TO_STRING(value));
 }
 
 AddBoundMethod(Intl.NumberFormat, 'format', formatNumber, 1, 'numberformat');
@@ -1755,7 +1752,7 @@
     dateMs = TO_NUMBER(dateValue);
   }
 
-  if (!IsFinite(dateMs)) throw MakeRangeError(kDateRange);
+  if (!NUMBER_IS_FINITE(dateMs)) throw MakeRangeError(kDateRange);
 
   return %InternalDateFormat(%GetImplFromInitializedIntlObject(formatter),
                              new GlobalDate(dateMs));
@@ -1770,7 +1767,7 @@
  */
 function IntlParseDate(formatter, value) {
   return %InternalDateParse(%GetImplFromInitializedIntlObject(formatter),
-                            GlobalString(value));
+                            TO_STRING(value));
 }
 
 
@@ -1927,7 +1924,7 @@
  */
 function adoptText(iterator, text) {
   %BreakIteratorAdoptText(%GetImplFromInitializedIntlObject(iterator),
-                          GlobalString(text));
+                          TO_STRING(text));
 }
 
 
@@ -1991,6 +1988,23 @@
   'dateformattime': UNDEFINED,
 };
 
+function clearDefaultObjects() {
+  defaultObjects['dateformatall'] = UNDEFINED;
+  defaultObjects['dateformatdate'] = UNDEFINED;
+  defaultObjects['dateformattime'] = UNDEFINED;
+}
+
+var date_cache_version = 0;
+
+function checkDateCacheCurrent() {
+  var new_date_cache_version = %DateCacheVersion();
+  if (new_date_cache_version == date_cache_version) {
+    return;
+  }
+  date_cache_version = new_date_cache_version;
+
+  clearDefaultObjects();
+}
 
 /**
  * Returns cached or newly created instance of a given service.
@@ -1999,6 +2013,7 @@
 function cachedOrNewService(service, locales, options, defaults) {
   var useOptions = (IS_UNDEFINED(defaults)) ? options : defaults;
   if (IS_UNDEFINED(locales) && IS_UNDEFINED(options)) {
+    checkDateCacheCurrent();
     if (IS_UNDEFINED(defaultObjects[service])) {
       defaultObjects[service] = new savedObjects[service](locales, useOptions);
     }
diff --git a/src/js/json.js b/src/js/json.js
deleted file mode 100644
index c6dbed9..0000000
--- a/src/js/json.js
+++ /dev/null
@@ -1,297 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalDate = global.Date;
-var GlobalJSON = global.JSON;
-var GlobalSet = global.Set;
-var InternalArray = utils.InternalArray;
-var MakeTypeError;
-var MaxSimple;
-var MinSimple;
-var ObjectHasOwnProperty;
-var Stack;
-var StackHas;
-var StackPop;
-var StackPush;
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-
-utils.Import(function(from) {
-  MakeTypeError = from.MakeTypeError;
-  MaxSimple = from.MaxSimple;
-  MinSimple = from.MinSimple;
-  ObjectHasOwnProperty = from.ObjectHasOwnProperty;
-  Stack = from.Stack;
-  StackHas = from.StackHas;
-  StackPop = from.StackPop;
-  StackPush = from.StackPush;
-});
-
-// -------------------------------------------------------------------
-
-function CreateDataProperty(o, p, v) {
-  var desc = {value: v, enumerable: true, writable: true, configurable: true};
-  return %reflect_define_property(o, p, desc);
-}
-
-
-function InternalizeJSONProperty(holder, name, reviver) {
-  var val = holder[name];
-  if (IS_RECEIVER(val)) {
-    if (%is_arraylike(val)) {
-      var length = TO_LENGTH(val.length);
-      for (var i = 0; i < length; i++) {
-        var newElement =
-            InternalizeJSONProperty(val, %_NumberToString(i), reviver);
-        if (IS_UNDEFINED(newElement)) {
-          %reflect_delete_property(val, i);
-        } else {
-          CreateDataProperty(val, i, newElement);
-        }
-      }
-    } else {
-      var keys = %object_keys(val);
-      for (var i = 0; i < keys.length; i++) {
-        var p = keys[i];
-        var newElement = InternalizeJSONProperty(val, p, reviver);
-        if (IS_UNDEFINED(newElement)) {
-          %reflect_delete_property(val, p);
-        } else {
-          CreateDataProperty(val, p, newElement);
-        }
-      }
-    }
-  }
-  return %_Call(reviver, holder, name, val);
-}
-
-
-function JSONParse(text, reviver) {
-  var unfiltered = %ParseJson(text);
-  if (IS_CALLABLE(reviver)) {
-    return InternalizeJSONProperty({'': unfiltered}, '', reviver);
-  } else {
-    return unfiltered;
-  }
-}
-
-
-function SerializeArray(value, replacer, stack, indent, gap) {
-  if (StackHas(stack, value)) throw MakeTypeError(kCircularStructure);
-  StackPush(stack, value);
-  var stepback = indent;
-  indent += gap;
-  var partial = new InternalArray();
-  var len = TO_LENGTH(value.length);
-  for (var i = 0; i < len; i++) {
-    var strP = JSONSerialize(%_NumberToString(i), value, replacer, stack,
-                             indent, gap);
-    if (IS_UNDEFINED(strP)) {
-      strP = "null";
-    }
-    partial.push(strP);
-  }
-  var final;
-  if (gap == "") {
-    final = "[" + partial.join(",") + "]";
-  } else if (partial.length > 0) {
-    var separator = ",\n" + indent;
-    final = "[\n" + indent + partial.join(separator) + "\n" +
-        stepback + "]";
-  } else {
-    final = "[]";
-  }
-  StackPop(stack);
-  return final;
-}
-
-
-function SerializeObject(value, replacer, stack, indent, gap) {
-  if (StackHas(stack, value)) throw MakeTypeError(kCircularStructure);
-  StackPush(stack, value);
-  var stepback = indent;
-  indent += gap;
-  var partial = new InternalArray();
-  if (IS_ARRAY(replacer)) {
-    var length = replacer.length;
-    for (var i = 0; i < length; i++) {
-      var p = replacer[i];
-      var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
-      if (!IS_UNDEFINED(strP)) {
-        var member = %QuoteJSONString(p) + ":";
-        if (gap != "") member += " ";
-        member += strP;
-        partial.push(member);
-      }
-    }
-  } else {
-    var keys = %object_keys(value);
-    for (var i = 0; i < keys.length; i++) {
-      var p = keys[i];
-      var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
-      if (!IS_UNDEFINED(strP)) {
-        var member = %QuoteJSONString(p) + ":";
-        if (gap != "") member += " ";
-        member += strP;
-        partial.push(member);
-      }
-    }
-  }
-  var final;
-  if (gap == "") {
-    final = "{" + partial.join(",") + "}";
-  } else if (partial.length > 0) {
-    var separator = ",\n" + indent;
-    final = "{\n" + indent + partial.join(separator) + "\n" +
-        stepback + "}";
-  } else {
-    final = "{}";
-  }
-  StackPop(stack);
-  return final;
-}
-
-
-function JSONSerialize(key, holder, replacer, stack, indent, gap) {
-  var value = holder[key];
-  if (IS_RECEIVER(value)) {
-    var toJSON = value.toJSON;
-    if (IS_CALLABLE(toJSON)) {
-      value = %_Call(toJSON, value, key);
-    }
-  }
-  if (IS_CALLABLE(replacer)) {
-    value = %_Call(replacer, holder, key, value);
-  }
-  if (IS_STRING(value)) {
-    return %QuoteJSONString(value);
-  } else if (IS_NUMBER(value)) {
-    return JSON_NUMBER_TO_STRING(value);
-  } else if (IS_BOOLEAN(value)) {
-    return value ? "true" : "false";
-  } else if (IS_NULL(value)) {
-    return "null";
-  } else if (IS_RECEIVER(value) && !IS_CALLABLE(value)) {
-    // Non-callable object. If it's a primitive wrapper, it must be unwrapped.
-    if (%is_arraylike(value)) {
-      return SerializeArray(value, replacer, stack, indent, gap);
-    } else if (IS_NUMBER_WRAPPER(value)) {
-      value = TO_NUMBER(value);
-      return JSON_NUMBER_TO_STRING(value);
-    } else if (IS_STRING_WRAPPER(value)) {
-      return %QuoteJSONString(TO_STRING(value));
-    } else if (IS_BOOLEAN_WRAPPER(value)) {
-      return %_ValueOf(value) ? "true" : "false";
-    } else {
-      return SerializeObject(value, replacer, stack, indent, gap);
-    }
-  }
-  // Undefined or a callable object.
-  return UNDEFINED;
-}
-
-
-function JSONStringify(value, replacer, space) {
-  if (arguments.length === 1 && !IS_PROXY(value)) {
-    return %BasicJSONStringify(value);
-  }
-  if (!IS_CALLABLE(replacer) && %is_arraylike(replacer)) {
-    var property_list = new InternalArray();
-    var seen_properties = new GlobalSet();
-    var length = TO_LENGTH(replacer.length);
-    for (var i = 0; i < length; i++) {
-      var v = replacer[i];
-      var item;
-      if (IS_STRING(v)) {
-        item = v;
-      } else if (IS_NUMBER(v)) {
-        item = %_NumberToString(v);
-      } else if (IS_STRING_WRAPPER(v) || IS_NUMBER_WRAPPER(v)) {
-        item = TO_STRING(v);
-      } else {
-        continue;
-      }
-      if (!seen_properties.has(item)) {
-        property_list.push(item);
-        seen_properties.add(item);
-      }
-    }
-    replacer = property_list;
-  }
-  if (IS_OBJECT(space)) {
-    // Unwrap 'space' if it is wrapped
-    if (IS_NUMBER_WRAPPER(space)) {
-      space = TO_NUMBER(space);
-    } else if (IS_STRING_WRAPPER(space)) {
-      space = TO_STRING(space);
-    }
-  }
-  var gap;
-  if (IS_NUMBER(space)) {
-    space = MaxSimple(0, MinSimple(TO_INTEGER(space), 10));
-    gap = %_SubString("          ", 0, space);
-  } else if (IS_STRING(space)) {
-    if (space.length > 10) {
-      gap = %_SubString(space, 0, 10);
-    } else {
-      gap = space;
-    }
-  } else {
-    gap = "";
-  }
-  if (!IS_CALLABLE(replacer) && !property_list && !gap && !IS_PROXY(value)) {
-    return %BasicJSONStringify(value);
-  }
-  return JSONSerialize('', {'': value}, replacer, new Stack(), "", gap);
-}
-
-// -------------------------------------------------------------------
-
-%AddNamedProperty(GlobalJSON, toStringTagSymbol, "JSON", READ_ONLY | DONT_ENUM);
-
-// Set up non-enumerable properties of the JSON object.
-utils.InstallFunctions(GlobalJSON, DONT_ENUM, [
-  "parse", JSONParse,
-  "stringify", JSONStringify
-]);
-
-// -------------------------------------------------------------------
-// Date.toJSON
-
-// 20.3.4.37 Date.prototype.toJSON ( key )
-function DateToJSON(key) {
-  var o = TO_OBJECT(this);
-  var tv = TO_PRIMITIVE_NUMBER(o);
-  if (IS_NUMBER(tv) && !NUMBER_IS_FINITE(tv)) {
-    return null;
-  }
-  return o.toISOString();
-}
-
-// Set up non-enumerable functions of the Date prototype object.
-utils.InstallFunctions(GlobalDate.prototype, DONT_ENUM, [
-  "toJSON", DateToJSON
-]);
-
-// -------------------------------------------------------------------
-// JSON Builtins
-
-function JsonSerializeAdapter(key, object) {
-  var holder = {};
-  holder[key] = object;
-  // No need to pass the actual holder since there is no replacer function.
-  return JSONSerialize(key, holder, UNDEFINED, new Stack(), "", "");
-}
-
-%InstallToContext(["json_serialize_adapter", JsonSerializeAdapter]);
-
-})
diff --git a/src/js/macros.py b/src/js/macros.py
index 3cc2d6c..c6ed019 100644
--- a/src/js/macros.py
+++ b/src/js/macros.py
@@ -110,7 +110,6 @@
 macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)));
 macro TO_BOOLEAN(arg) = (!!(arg));
 macro TO_INTEGER(arg) = (%_ToInteger(arg));
-macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(arg));
 macro TO_INT32(arg) = ((arg) | 0);
 macro TO_UINT32(arg) = ((arg) >>> 0);
 macro INVERT_NEG_ZERO(arg) = ((arg) + 0);
@@ -124,7 +123,6 @@
 macro TO_NAME(arg) = (%_ToName(arg));
 macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null");
 macro HAS_OWN_PROPERTY(obj, key) = (%_Call(ObjectHasOwnProperty, obj, key));
-macro HAS_INDEX(array, index, is_array) = ((is_array && %_HasFastPackedElements(%IS_VAR(array)) && (index < array.length)) ||  (index in array));
 
 # Private names.
 macro IS_PRIVATE(sym) = (%SymbolIsPrivate(sym));
diff --git a/src/js/math.js b/src/js/math.js
index f8ad6b1..74d3aa6 100644
--- a/src/js/math.js
+++ b/src/js/math.js
@@ -16,7 +16,6 @@
 var GlobalFloat64Array = global.Float64Array;
 var GlobalMath = global.Math;
 var GlobalObject = global.Object;
-var InternalArray = utils.InternalArray;
 var NaN = %GetRootNaN();
 var nextRandomIndex = 0;
 var randomNumbers = UNDEFINED;
@@ -30,25 +29,6 @@
   return (x > 0) ? x : 0 - x;
 }
 
-// ECMA 262 - 15.8.2.5
-// The naming of y and x matches the spec, as does the order in which
-// ToNumber (valueOf) is called.
-function MathAtan2JS(y, x) {
-  y = +y;
-  x = +x;
-  return %MathAtan2(y, x);
-}
-
-// ECMA 262 - 15.8.2.8
-function MathExp(x) {
-  return %MathExpRT(TO_NUMBER(x));
-}
-
-// ECMA 262 - 15.8.2.10
-function MathLog(x) {
-  return %_MathLogRT(TO_NUMBER(x));
-}
-
 // ECMA 262 - 15.8.2.13
 function MathPowJS(x, y) {
   return %_MathPow(TO_NUMBER(x), TO_NUMBER(y));
@@ -63,7 +43,11 @@
   // first two elements are reserved for the PRNG state.
   if (nextRandomIndex <= kRandomNumberStart) {
     randomNumbers = %GenerateRandomNumbers(randomNumbers);
-    nextRandomIndex = randomNumbers.length;
+    if (%_IsTypedArray(randomNumbers)) {
+      nextRandomIndex = %_TypedArrayGetLength(randomNumbers);
+    } else {
+      nextRandomIndex = randomNumbers.length;
+    }
   }
   return randomNumbers[--nextRandomIndex];
 }
@@ -71,7 +55,7 @@
 function MathRandomRaw() {
   if (nextRandomIndex <= kRandomNumberStart) {
     randomNumbers = %GenerateRandomNumbers(randomNumbers);
-    nextRandomIndex = randomNumbers.length;
+    nextRandomIndex = %_TypedArrayGetLength(randomNumbers);
   }
   return %_DoubleLo(randomNumbers[--nextRandomIndex]) & 0x3FFFFFFF;
 }
@@ -90,9 +74,9 @@
   x = TO_NUMBER(x);
   // Idempotent for NaN, +/-0 and +/-Infinity.
   if (x === 0 || !NUMBER_IS_FINITE(x)) return x;
-  if (x > 0) return MathLog(x + %math_sqrt(x * x + 1));
+  if (x > 0) return %math_log(x + %math_sqrt(x * x + 1));
   // This is to prevent numerical errors caused by large negative x.
-  return -MathLog(-x + %math_sqrt(x * x + 1));
+  return -%math_log(-x + %math_sqrt(x * x + 1));
 }
 
 // ES6 draft 09-27-13, section 20.2.2.3.
@@ -101,17 +85,7 @@
   if (x < 1) return NaN;
   // Idempotent for NaN and +Infinity.
   if (!NUMBER_IS_FINITE(x)) return x;
-  return MathLog(x + %math_sqrt(x + 1) * %math_sqrt(x - 1));
-}
-
-// ES6 draft 09-27-13, section 20.2.2.7.
-function MathAtanh(x) {
-  x = TO_NUMBER(x);
-  // Idempotent for +/-0.
-  if (x === 0) return x;
-  // Returns NaN for NaN and +/- Infinity.
-  if (!NUMBER_IS_FINITE(x)) return NaN;
-  return 0.5 * MathLog((1 + x) / (1 - x));
+  return %math_log(x + %math_sqrt(x + 1) * %math_sqrt(x - 1));
 }
 
 // ES6 draft 09-27-13, section 20.2.2.17.
@@ -143,29 +117,6 @@
   return %math_sqrt(sum) * max;
 }
 
-// ES6 draft 09-27-13, section 20.2.2.9.
-// Cube root approximation, refer to: http://metamerist.com/cbrt/cbrt.htm
-// Using initial approximation adapted from Kahan's cbrt and 4 iterations
-// of Newton's method.
-function MathCbrt(x) {
-  x = TO_NUMBER(x);
-  if (x == 0 || !NUMBER_IS_FINITE(x)) return x;
-  return x >= 0 ? CubeRoot(x) : -CubeRoot(-x);
-}
-
-macro NEWTON_ITERATION_CBRT(x, approx)
-  (1.0 / 3.0) * (x / (approx * approx) + 2 * approx);
-endmacro
-
-function CubeRoot(x) {
-  var approx_hi = %math_floor(%_DoubleHi(x) / 3) + 0x2A9F7893;
-  var approx = %_ConstructDouble(approx_hi | 0, 0);
-  approx = NEWTON_ITERATION_CBRT(x, approx);
-  approx = NEWTON_ITERATION_CBRT(x, approx);
-  approx = NEWTON_ITERATION_CBRT(x, approx);
-  return NEWTON_ITERATION_CBRT(x, approx);
-}
-
 // -------------------------------------------------------------------
 
 %InstallToContext([
@@ -176,15 +127,6 @@
 
 // Set up math constants.
 utils.InstallConstants(GlobalMath, [
-  // ECMA-262, section 15.8.1.1.
-  "E", 2.7182818284590452354,
-  // ECMA-262, section 15.8.1.2.
-  "LN10", 2.302585092994046,
-  // ECMA-262, section 15.8.1.3.
-  "LN2", 0.6931471805599453,
-  // ECMA-262, section 15.8.1.4.
-  "LOG2E", 1.4426950408889634,
-  "LOG10E", 0.4342944819032518,
   "PI", 3.1415926535897932,
   "SQRT1_2", 0.7071067811865476,
   "SQRT2", 1.4142135623730951
@@ -195,20 +137,13 @@
 utils.InstallFunctions(GlobalMath, DONT_ENUM, [
   "random", MathRandom,
   "abs", MathAbs,
-  "exp", MathExp,
-  "log", MathLog,
-  "atan2", MathAtan2JS,
   "pow", MathPowJS,
   "sign", MathSign,
   "asinh", MathAsinh,
   "acosh", MathAcosh,
-  "atanh", MathAtanh,
   "hypot", MathHypot,
-  "cbrt", MathCbrt
 ]);
 
-%SetForceInlineFlag(MathAbs);
-%SetForceInlineFlag(MathAtan2JS);
 %SetForceInlineFlag(MathRandom);
 %SetForceInlineFlag(MathSign);
 
@@ -217,7 +152,6 @@
 
 utils.Export(function(to) {
   to.MathAbs = MathAbs;
-  to.MathExp = MathExp;
   to.IntRandom = MathRandomRaw;
 });
 
diff --git a/src/js/messages.js b/src/js/messages.js
index b5c4b56..24d8d2b 100644
--- a/src/js/messages.js
+++ b/src/js/messages.js
@@ -41,7 +41,6 @@
 var Script = utils.ImportNow("Script");
 var stackTraceSymbol = utils.ImportNow("stack_trace_symbol");
 var StringIndexOf;
-var StringSubstring;
 var SymbolToString;
 var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
 var Uint16x8ToString;
@@ -59,7 +58,6 @@
   Int8x16ToString = from.Int8x16ToString;
   ObjectHasOwnProperty = from.ObjectHasOwnProperty;
   StringIndexOf = from.StringIndexOf;
-  StringSubstring = from.StringSubstring;
   SymbolToString = from.SymbolToString;
   Uint16x8ToString = from.Uint16x8ToString;
   Uint32x4ToString = from.Uint32x4ToString;
@@ -214,210 +212,26 @@
   var start_position = %MessageGetStartPosition(message);
   var location = script.locationFromPosition(start_position, true);
   if (location == null) return "";
-  return location.sourceText();
-}
-
-
-/**
- * Find a line number given a specific source position.
- * @param {number} position The source position.
- * @return {number} 0 if input too small, -1 if input too large,
-       else the line number.
- */
-function ScriptLineFromPosition(position) {
-  var lower = 0;
-  var upper = this.lineCount() - 1;
-  var line_ends = this.line_ends;
-
-  // We'll never find invalid positions so bail right away.
-  if (position > line_ends[upper]) {
-    return -1;
-  }
-
-  // This means we don't have to safe-guard indexing line_ends[i - 1].
-  if (position <= line_ends[0]) {
-    return 0;
-  }
-
-  // Binary search to find line # from position range.
-  while (upper >= 1) {
-    var i = (lower + upper) >> 1;
-
-    if (position > line_ends[i]) {
-      lower = i + 1;
-    } else if (position <= line_ends[i - 1]) {
-      upper = i - 1;
-    } else {
-      return i;
-    }
-  }
-
-  return -1;
+  return location.sourceText;
 }
 
 
 /**
  * Get information on a specific source position.
+ * Returns an object with the following following properties:
+ *   script     : script object for the source
+ *   line       : source line number
+ *   column     : source column within the line
+ *   position   : position within the source
+ *   sourceText : a string containing the current line
  * @param {number} position The source position
  * @param {boolean} include_resource_offset Set to true to have the resource
  *     offset added to the location
- * @return {SourceLocation}
- *     If line is negative or not in the source null is returned.
+ * @return If line is negative or not in the source null is returned.
  */
 function ScriptLocationFromPosition(position,
                                     include_resource_offset) {
-  var line = this.lineFromPosition(position);
-  if (line == -1) return null;
-
-  // Determine start, end and column.
-  var line_ends = this.line_ends;
-  var start = line == 0 ? 0 : line_ends[line - 1] + 1;
-  var end = line_ends[line];
-  if (end > 0 && %_StringCharAt(this.source, end - 1) === '\r') {
-    end--;
-  }
-  var column = position - start;
-
-  // Adjust according to the offset within the resource.
-  if (include_resource_offset) {
-    line += this.line_offset;
-    if (line == this.line_offset) {
-      column += this.column_offset;
-    }
-  }
-
-  return new SourceLocation(this, position, line, column, start, end);
-}
-
-
-/**
- * Get information on a specific source line and column possibly offset by a
- * fixed source position. This function is used to find a source position from
- * a line and column position. The fixed source position offset is typically
- * used to find a source position in a function based on a line and column in
- * the source for the function alone. The offset passed will then be the
- * start position of the source for the function within the full script source.
- * @param {number} opt_line The line within the source. Default value is 0
- * @param {number} opt_column The column in within the line. Default value is 0
- * @param {number} opt_offset_position The offset from the begining of the
- *     source from where the line and column calculation starts.
- *     Default value is 0
- * @return {SourceLocation}
- *     If line is negative or not in the source null is returned.
- */
-function ScriptLocationFromLine(opt_line, opt_column, opt_offset_position) {
-  // Default is the first line in the script. Lines in the script is relative
-  // to the offset within the resource.
-  var line = 0;
-  if (!IS_UNDEFINED(opt_line)) {
-    line = opt_line - this.line_offset;
-  }
-
-  // Default is first column. If on the first line add the offset within the
-  // resource.
-  var column = opt_column || 0;
-  if (line == 0) {
-    column -= this.column_offset;
-  }
-
-  var offset_position = opt_offset_position || 0;
-  if (line < 0 || column < 0 || offset_position < 0) return null;
-  if (line == 0) {
-    return this.locationFromPosition(offset_position + column, false);
-  } else {
-    // Find the line where the offset position is located.
-    var offset_line = this.lineFromPosition(offset_position);
-
-    if (offset_line == -1 || offset_line + line >= this.lineCount()) {
-      return null;
-    }
-
-    return this.locationFromPosition(
-        this.line_ends[offset_line + line - 1] + 1 + column);  // line > 0 here.
-  }
-}
-
-
-/**
- * Get a slice of source code from the script. The boundaries for the slice is
- * specified in lines.
- * @param {number} opt_from_line The first line (zero bound) in the slice.
- *     Default is 0
- * @param {number} opt_to_column The last line (zero bound) in the slice (non
- *     inclusive). Default is the number of lines in the script
- * @return {SourceSlice} The source slice or null of the parameters where
- *     invalid
- */
-function ScriptSourceSlice(opt_from_line, opt_to_line) {
-  var from_line = IS_UNDEFINED(opt_from_line) ? this.line_offset
-                                              : opt_from_line;
-  var to_line = IS_UNDEFINED(opt_to_line) ? this.line_offset + this.lineCount()
-                                          : opt_to_line;
-
-  // Adjust according to the offset within the resource.
-  from_line -= this.line_offset;
-  to_line -= this.line_offset;
-  if (from_line < 0) from_line = 0;
-  if (to_line > this.lineCount()) to_line = this.lineCount();
-
-  // Check parameters.
-  if (from_line >= this.lineCount() ||
-      to_line < 0 ||
-      from_line > to_line) {
-    return null;
-  }
-
-  var line_ends = this.line_ends;
-  var from_position = from_line == 0 ? 0 : line_ends[from_line - 1] + 1;
-  var to_position = to_line == 0 ? 0 : line_ends[to_line - 1] + 1;
-
-  // Return a source slice with line numbers re-adjusted to the resource.
-  return new SourceSlice(this,
-                         from_line + this.line_offset,
-                         to_line + this.line_offset,
-                          from_position, to_position);
-}
-
-
-function ScriptSourceLine(opt_line) {
-  // Default is the first line in the script. Lines in the script are relative
-  // to the offset within the resource.
-  var line = 0;
-  if (!IS_UNDEFINED(opt_line)) {
-    line = opt_line - this.line_offset;
-  }
-
-  // Check parameter.
-  if (line < 0 || this.lineCount() <= line) {
-    return null;
-  }
-
-  // Return the source line.
-  var line_ends = this.line_ends;
-  var start = line == 0 ? 0 : line_ends[line - 1] + 1;
-  var end = line_ends[line];
-  return %_Call(StringSubstring, this.source, start, end);
-}
-
-
-/**
- * Returns the number of source lines.
- * @return {number}
- *     Number of source lines.
- */
-function ScriptLineCount() {
-  // Return number of source lines.
-  return this.line_ends.length;
-}
-
-
-/**
- * Returns the position of the nth line end.
- * @return {number}
- *     Zero-based position of the nth line end in the script.
- */
-function ScriptLineEnd(n) {
-  return this.line_ends[n];
+  return %ScriptPositionInfo(this, position, !!include_resource_offset);
 }
 
 
@@ -442,113 +256,15 @@
     "name",
     "source_url",
     "source_mapping_url",
-    "line_ends",
     "line_offset",
     "column_offset"
   ], [
-    "lineFromPosition", ScriptLineFromPosition,
     "locationFromPosition", ScriptLocationFromPosition,
-    "locationFromLine", ScriptLocationFromLine,
-    "sourceSlice", ScriptSourceSlice,
-    "sourceLine", ScriptSourceLine,
-    "lineCount", ScriptLineCount,
     "nameOrSourceURL", ScriptNameOrSourceURL,
-    "lineEnd", ScriptLineEnd
   ]
 );
 
 
-/**
- * Class for source location. A source location is a position within some
- * source with the following properties:
- *   script   : script object for the source
- *   line     : source line number
- *   column   : source column within the line
- *   position : position within the source
- *   start    : position of start of source context (inclusive)
- *   end      : position of end of source context (not inclusive)
- * Source text for the source context is the character interval
- * [start, end[. In most cases end will point to a newline character.
- * It might point just past the final position of the source if the last
- * source line does not end with a newline character.
- * @param {Script} script The Script object for which this is a location
- * @param {number} position Source position for the location
- * @param {number} line The line number for the location
- * @param {number} column The column within the line for the location
- * @param {number} start Source position for start of source context
- * @param {number} end Source position for end of source context
- * @constructor
- */
-function SourceLocation(script, position, line, column, start, end) {
-  this.script = script;
-  this.position = position;
-  this.line = line;
-  this.column = column;
-  this.start = start;
-  this.end = end;
-}
-
-
-/**
- * Get the source text for a SourceLocation
- * @return {String}
- *     Source text for this location.
- */
-function SourceLocationSourceText() {
-  return %_Call(StringSubstring, this.script.source, this.start, this.end);
-}
-
-
-utils.SetUpLockedPrototype(SourceLocation,
-  ["script", "position", "line", "column", "start", "end"],
-  ["sourceText", SourceLocationSourceText]
-);
-
-
-/**
- * Class for a source slice. A source slice is a part of a script source with
- * the following properties:
- *   script        : script object for the source
- *   from_line     : line number for the first line in the slice
- *   to_line       : source line number for the last line in the slice
- *   from_position : position of the first character in the slice
- *   to_position   : position of the last character in the slice
- * The to_line and to_position are not included in the slice, that is the lines
- * in the slice are [from_line, to_line[. Likewise the characters in the slice
- * are [from_position, to_position[.
- * @param {Script} script The Script object for the source slice
- * @param {number} from_line
- * @param {number} to_line
- * @param {number} from_position
- * @param {number} to_position
- * @constructor
- */
-function SourceSlice(script, from_line, to_line, from_position, to_position) {
-  this.script = script;
-  this.from_line = from_line;
-  this.to_line = to_line;
-  this.from_position = from_position;
-  this.to_position = to_position;
-}
-
-/**
- * Get the source text for a SourceSlice
- * @return {String} Source text for this slice. The last line will include
- *     the line terminating characters (if any)
- */
-function SourceSliceSourceText() {
-  return %_Call(StringSubstring,
-                this.script.source,
-                this.from_position,
-                this.to_position);
-}
-
-utils.SetUpLockedPrototype(SourceSlice,
-  ["script", "from_line", "to_line", "from_position", "to_position"],
-  ["sourceText", SourceSliceSourceText]
-);
-
-
 function GetStackTraceLine(recv, fun, pos, isGlobal) {
   return new CallSite(recv, fun, pos, false).toString();
 }
@@ -559,8 +275,8 @@
 function CallSite(receiver, fun, pos, strict_mode) {
   // For wasm frames, receiver is the wasm object and fun is the function index
   // instead of an actual function.
-  if (!IS_FUNCTION(fun) && !IS_NUMBER(fun)) {
-    throw MakeTypeError(kCallSiteExpectsFunction, typeof fun);
+  if (!IS_FUNCTION(fun) && !%IsWasmObject(receiver)) {
+    throw MakeTypeError(kCallSiteExpectsFunction, typeof receiver, typeof fun);
   }
 
   if (IS_UNDEFINED(new.target)) {
@@ -630,12 +346,6 @@
 function CallSiteGetFunctionName() {
   // See if the function knows its own name
   CheckCallSite(this, "getFunctionName");
-  if (HAS_PRIVATE(this, callSiteWasmObjectSymbol)) {
-    var wasm = GET_PRIVATE(this, callSiteWasmObjectSymbol);
-    var func_index = GET_PRIVATE(this, callSiteWasmFunctionIndexSymbol);
-    if (IS_UNDEFINED(wasm)) return "<WASM>";
-    return %WasmGetFunctionName(wasm, func_index);
-  }
   return %CallSiteGetFunctionNameRT(this);
 }
 
@@ -679,7 +389,8 @@
     var funName = this.getFunctionName();
     var funcIndex = GET_PRIVATE(this, callSiteWasmFunctionIndexSymbol);
     var pos = this.getPosition();
-    return funName + " (<WASM>:" + funcIndex + ":" + pos + ")";
+    if (IS_NULL(funName)) funName = "<WASM UNNAMED>";
+    return funName + " (<WASM>[" + funcIndex + "]+" + pos + ")";
   }
 
   var fileName;
diff --git a/src/js/prologue.js b/src/js/prologue.js
index b352eb1..79fe9b7 100644
--- a/src/js/prologue.js
+++ b/src/js/prologue.js
@@ -266,9 +266,6 @@
     imports_from_experimental(exports_container);
   }
 
-  utils.CreateDoubleResultArray();
-  utils.CreateDoubleResultArray = UNDEFINED;
-
   utils.Export = UNDEFINED;
   utils.PostDebug = UNDEFINED;
   utils.PostExperimentals = UNDEFINED;
@@ -281,9 +278,6 @@
     imports(exports_container);
   }
 
-  utils.CreateDoubleResultArray();
-  utils.CreateDoubleResultArray = UNDEFINED;
-
   exports_container = UNDEFINED;
 
   utils.Export = UNDEFINED;
diff --git a/src/js/promise.js b/src/js/promise.js
index 42b772b..5e8c460 100644
--- a/src/js/promise.js
+++ b/src/js/promise.js
@@ -21,10 +21,13 @@
     utils.ImportNow("promise_reject_reactions_symbol");
 var promiseFulfillReactionsSymbol =
     utils.ImportNow("promise_fulfill_reactions_symbol");
+var promiseDeferredReactionsSymbol =
+    utils.ImportNow("promise_deferred_reactions_symbol");
 var promiseRawSymbol = utils.ImportNow("promise_raw_symbol");
 var promiseStateSymbol = utils.ImportNow("promise_state_symbol");
 var promiseResultSymbol = utils.ImportNow("promise_result_symbol");
 var SpeciesConstructor;
+var speciesSymbol = utils.ImportNow("species_symbol");
 var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
 
 utils.Import(function(from) {
@@ -51,7 +54,7 @@
   var resolve = value => {
     if (alreadyResolved === true) return;
     alreadyResolved = true;
-    FulfillPromise(promise, value);
+    ResolvePromise(promise, value);
   };
 
   // ES#sec-promise-reject-functions
@@ -72,25 +75,25 @@
 
 // ES#sec-promise-executor
 // Promise ( executor )
-var GlobalPromise = function Promise(resolver) {
-  if (resolver === promiseRawSymbol) {
+var GlobalPromise = function Promise(executor) {
+  if (executor === promiseRawSymbol) {
     return %_NewObject(GlobalPromise, new.target);
   }
   if (IS_UNDEFINED(new.target)) throw MakeTypeError(kNotAPromise, this);
-  if (!IS_CALLABLE(resolver)) {
-    throw MakeTypeError(kResolverNotAFunction, resolver);
+  if (!IS_CALLABLE(executor)) {
+    throw MakeTypeError(kResolverNotAFunction, executor);
   }
 
   var promise = PromiseInit(%_NewObject(GlobalPromise, new.target));
   var callbacks = CreateResolvingFunctions(promise);
-
+  var debug_is_active = DEBUG_IS_ACTIVE;
   try {
-    %DebugPushPromise(promise, Promise);
-    resolver(callbacks.resolve, callbacks.reject);
+    if (debug_is_active) %DebugPushPromise(promise, Promise);
+    executor(callbacks.resolve, callbacks.reject);
   } catch (e) {
     %_Call(callbacks.reject, UNDEFINED, e);
   } finally {
-    %DebugPopPromise();
+    if (debug_is_active) %DebugPopPromise();
   }
 
   return promise;
@@ -98,11 +101,33 @@
 
 // Core functionality.
 
-function PromiseSet(promise, status, value, onResolve, onReject) {
+function PromiseSet(promise, status, value) {
   SET_PRIVATE(promise, promiseStateSymbol, status);
   SET_PRIVATE(promise, promiseResultSymbol, value);
-  SET_PRIVATE(promise, promiseFulfillReactionsSymbol, onResolve);
-  SET_PRIVATE(promise, promiseRejectReactionsSymbol, onReject);
+
+  // There are 3 possible states for the resolve, reject symbols when we add
+  // a new callback --
+  // 1) UNDEFINED -- This is the zero state where there is no callback
+  // registered. When we see this state, we directly attach the callbacks to
+  // the symbol.
+  // 2) !IS_ARRAY -- There is a single callback directly attached to the
+  // symbols. We need to create a new array to store additional callbacks.
+  // 3) IS_ARRAY -- There are multiple callbacks already registered,
+  // therefore we can just push the new callback to the existing array.
+  SET_PRIVATE(promise, promiseFulfillReactionsSymbol, UNDEFINED);
+  SET_PRIVATE(promise, promiseRejectReactionsSymbol, UNDEFINED);
+
+  // There are 2 possible states for this symbol --
+  // 1) UNDEFINED -- This is the zero state, no deferred object is
+  // attached to this symbol. When we want to add a new deferred we
+  // directly attach it to this symbol.
+  // 2) symbol with attached deferred object -- New deferred objects
+  // are not attached to this symbol, but instead they are directly
+  // attached to the resolve, reject callback arrays. At this point,
+  // the deferred symbol's state is stale, and the deferreds should be
+  // read from the reject, resolve callbacks.
+  SET_PRIVATE(promise, promiseDeferredReactionsSymbol, UNDEFINED);
+
   return promise;
 }
 
@@ -114,38 +139,46 @@
 }
 
 function PromiseInit(promise) {
-  return PromiseSet(
-      promise, kPending, UNDEFINED, new InternalArray, new InternalArray)
+  return PromiseSet(promise, kPending, UNDEFINED);
 }
 
-function PromiseDone(promise, status, value, promiseQueue) {
+function FulfillPromise(promise, status, value, promiseQueue) {
   if (GET_PRIVATE(promise, promiseStateSymbol) === kPending) {
     var tasks = GET_PRIVATE(promise, promiseQueue);
-    if (tasks.length) PromiseEnqueue(value, tasks, status);
+    if (!IS_UNDEFINED(tasks)) {
+      var tasks = GET_PRIVATE(promise, promiseQueue);
+      var deferreds = GET_PRIVATE(promise, promiseDeferredReactionsSymbol);
+      PromiseEnqueue(value, tasks, deferreds, status);
+    }
     PromiseSet(promise, status, value);
   }
 }
 
 function PromiseHandle(value, handler, deferred) {
+  var debug_is_active = DEBUG_IS_ACTIVE;
   try {
-    %DebugPushPromise(deferred.promise, PromiseHandle);
+    if (debug_is_active) %DebugPushPromise(deferred.promise, PromiseHandle);
     var result = handler(value);
     deferred.resolve(result);
   } catch (exception) {
     try { deferred.reject(exception); } catch (e) { }
   } finally {
-    %DebugPopPromise();
+    if (debug_is_active) %DebugPopPromise();
   }
 }
 
-function PromiseEnqueue(value, tasks, status) {
+function PromiseEnqueue(value, tasks, deferreds, status) {
   var id, name, instrumenting = DEBUG_IS_ACTIVE;
   %EnqueueMicrotask(function() {
     if (instrumenting) {
       %DebugAsyncTaskEvent({ type: "willHandle", id: id, name: name });
     }
-    for (var i = 0; i < tasks.length; i += 2) {
-      PromiseHandle(value, tasks[i], tasks[i + 1])
+    if (IS_ARRAY(tasks)) {
+      for (var i = 0; i < tasks.length; i += 2) {
+        PromiseHandle(value, tasks[i], tasks[i + 1]);
+      }
+    } else {
+      PromiseHandle(value, tasks, deferreds);
     }
     if (instrumenting) {
       %DebugAsyncTaskEvent({ type: "didHandle", id: id, name: name });
@@ -158,6 +191,33 @@
   }
 }
 
+function PromiseAttachCallbacks(promise, deferred, onResolve, onReject) {
+  var maybeResolveCallbacks =
+      GET_PRIVATE(promise, promiseFulfillReactionsSymbol);
+  if (IS_UNDEFINED(maybeResolveCallbacks)) {
+    SET_PRIVATE(promise, promiseFulfillReactionsSymbol, onResolve);
+    SET_PRIVATE(promise, promiseRejectReactionsSymbol, onReject);
+    SET_PRIVATE(promise, promiseDeferredReactionsSymbol, deferred);
+  } else if (!IS_ARRAY(maybeResolveCallbacks)) {
+    var resolveCallbacks = new InternalArray();
+    var rejectCallbacks = new InternalArray();
+    var existingDeferred = GET_PRIVATE(promise, promiseDeferredReactionsSymbol);
+
+    resolveCallbacks.push(
+        maybeResolveCallbacks, existingDeferred, onResolve, deferred);
+    rejectCallbacks.push(GET_PRIVATE(promise, promiseRejectReactionsSymbol),
+                         existingDeferred,
+                         onReject,
+                         deferred);
+
+    SET_PRIVATE(promise, promiseFulfillReactionsSymbol, resolveCallbacks);
+    SET_PRIVATE(promise, promiseRejectReactionsSymbol, rejectCallbacks);
+  } else {
+    maybeResolveCallbacks.push(onResolve, deferred);
+    GET_PRIVATE(promise, promiseRejectReactionsSymbol).push(onReject, deferred);
+  }
+}
+
 function PromiseIdResolveHandler(x) { return x }
 function PromiseIdRejectHandler(r) { throw r }
 
@@ -177,29 +237,58 @@
   return new GlobalPromise(PromiseNopResolver)
 }
 
-// ES#sec-fulfillpromise
-// FulfillPromise ( promise, value)
-function FulfillPromise(promise, x) {
-  if (x === promise) {
-    return RejectPromise(promise, MakeTypeError(kPromiseCyclic, x));
+// ES#sec-promise-resolve-functions
+// Promise Resolve Functions, steps 6-13
+function ResolvePromise(promise, resolution) {
+  if (resolution === promise) {
+    return RejectPromise(promise, MakeTypeError(kPromiseCyclic, resolution));
   }
-  if (IS_RECEIVER(x)) {
+  if (IS_RECEIVER(resolution)) {
     // 25.4.1.3.2 steps 8-12
     try {
-      var then = x.then;
+      var then = resolution.then;
     } catch (e) {
       return RejectPromise(promise, e);
     }
+
+    // Resolution is a native promise and if it's already resolved or
+    // rejected, shortcircuit the resolution procedure by directly
+    // reusing the value from the promise.
+    if (IsPromise(resolution) && then === PromiseThen) {
+      var thenableState = GET_PRIVATE(resolution, promiseStateSymbol);
+      if (thenableState === kFulfilled) {
+        // This goes inside the if-else to save one symbol lookup in
+        // the slow path.
+        var thenableValue = GET_PRIVATE(resolution, promiseResultSymbol);
+        FulfillPromise(promise, kFulfilled, thenableValue,
+                       promiseFulfillReactionsSymbol);
+        SET_PRIVATE(promise, promiseHasHandlerSymbol, true);
+        return;
+      } else if (thenableState === kRejected) {
+        var thenableValue = GET_PRIVATE(resolution, promiseResultSymbol);
+        if (!HAS_DEFINED_PRIVATE(resolution, promiseHasHandlerSymbol)) {
+          // Promise has already been rejected, but had no handler.
+          // Revoke previously triggered reject event.
+          %PromiseRevokeReject(resolution);
+        }
+        RejectPromise(promise, thenableValue);
+        SET_PRIVATE(resolution, promiseHasHandlerSymbol, true);
+        return;
+      }
+    }
+
     if (IS_CALLABLE(then)) {
       // PromiseResolveThenableJob
-      var id, name, instrumenting = DEBUG_IS_ACTIVE;
+      var id;
+      var name = "PromiseResolveThenableJob";
+      var instrumenting = DEBUG_IS_ACTIVE;
       %EnqueueMicrotask(function() {
         if (instrumenting) {
           %DebugAsyncTaskEvent({ type: "willHandle", id: id, name: name });
         }
         var callbacks = CreateResolvingFunctions(promise);
         try {
-          %_Call(then, x, callbacks.resolve, callbacks.reject);
+          %_Call(then, resolution, callbacks.resolve, callbacks.reject);
         } catch (e) {
           %_Call(callbacks.reject, UNDEFINED, e);
         }
@@ -209,28 +298,27 @@
       });
       if (instrumenting) {
         id = ++lastMicrotaskId;
-        name = "PromseResolveThenableJob";
         %DebugAsyncTaskEvent({ type: "enqueue", id: id, name: name });
       }
       return;
     }
   }
-  PromiseDone(promise, kFulfilled, x, promiseFulfillReactionsSymbol);
+  FulfillPromise(promise, kFulfilled, resolution, promiseFulfillReactionsSymbol);
 }
 
 // ES#sec-rejectpromise
 // RejectPromise ( promise, reason )
-function RejectPromise(promise, r) {
+function RejectPromise(promise, reason) {
   // Check promise status to confirm that this reject has an effect.
   // Call runtime for callbacks to the debugger or for unhandled reject.
   if (GET_PRIVATE(promise, promiseStateSymbol) === kPending) {
     var debug_is_active = DEBUG_IS_ACTIVE;
     if (debug_is_active ||
         !HAS_DEFINED_PRIVATE(promise, promiseHasHandlerSymbol)) {
-      %PromiseRejectEvent(promise, r, debug_is_active);
+      %PromiseRejectEvent(promise, reason, debug_is_active);
     }
   }
-  PromiseDone(promise, kRejected, r, promiseRejectReactionsSymbol)
+  FulfillPromise(promise, kRejected, reason, promiseRejectReactionsSymbol)
 }
 
 // ES#sec-newpromisecapability
@@ -318,14 +406,11 @@
   var deferred = NewPromiseCapability(constructor);
   switch (status) {
     case kPending:
-      GET_PRIVATE(this, promiseFulfillReactionsSymbol).push(onResolve,
-                                                            deferred);
-      GET_PRIVATE(this, promiseRejectReactionsSymbol).push(onReject, deferred);
+      PromiseAttachCallbacks(this, deferred, onResolve, onReject);
       break;
     case kFulfilled:
       PromiseEnqueue(GET_PRIVATE(this, promiseResultSymbol),
-                     [onResolve, deferred],
-                     kFulfilled);
+                     onResolve, deferred, kFulfilled);
       break;
     case kRejected:
       if (!HAS_DEFINED_PRIVATE(this, promiseHasHandlerSymbol)) {
@@ -334,8 +419,7 @@
         %PromiseRevokeReject(this);
       }
       PromiseEnqueue(GET_PRIVATE(this, promiseResultSymbol),
-                     [onReject, deferred],
-                     kRejected);
+                     onReject, deferred, kRejected);
       break;
   }
   // Mark this promise as having handler.
@@ -444,20 +528,30 @@
 
 // Utility for debugger
 
+function PromiseHasUserDefinedRejectHandlerCheck(handler, deferred) {
+  if (handler !== PromiseIdRejectHandler) {
+    var combinedDeferred = GET_PRIVATE(handler, promiseCombinedDeferredSymbol);
+    if (IS_UNDEFINED(combinedDeferred)) return true;
+    if (PromiseHasUserDefinedRejectHandlerRecursive(combinedDeferred.promise)) {
+      return true;
+    }
+  } else if (PromiseHasUserDefinedRejectHandlerRecursive(deferred.promise)) {
+    return true;
+  }
+  return false;
+}
+
 function PromiseHasUserDefinedRejectHandlerRecursive(promise) {
   var queue = GET_PRIVATE(promise, promiseRejectReactionsSymbol);
+  var deferreds = GET_PRIVATE(promise, promiseDeferredReactionsSymbol);
   if (IS_UNDEFINED(queue)) return false;
-  for (var i = 0; i < queue.length; i += 2) {
-    var handler = queue[i];
-    if (handler !== PromiseIdRejectHandler) {
-      var deferred = GET_PRIVATE(handler, promiseCombinedDeferredSymbol);
-      if (IS_UNDEFINED(deferred)) return true;
-      if (PromiseHasUserDefinedRejectHandlerRecursive(deferred.promise)) {
+  if (!IS_ARRAY(queue)) {
+    return PromiseHasUserDefinedRejectHandlerCheck(queue, deferreds);
+  } else {
+    for (var i = 0; i < queue.length; i += 2) {
+      if (PromiseHasUserDefinedRejectHandlerCheck(queue[i], queue[i + 1])) {
         return true;
       }
-    } else if (PromiseHasUserDefinedRejectHandlerRecursive(
-                   queue[i + 1].promise)) {
-      return true;
     }
   }
   return false;
@@ -470,6 +564,11 @@
   return PromiseHasUserDefinedRejectHandlerRecursive(this);
 };
 
+
+function PromiseSpecies() {
+  return this;
+}
+
 // -------------------------------------------------------------------
 // Install exported functions.
 
@@ -484,6 +583,8 @@
   "resolve", PromiseResolve
 ]);
 
+utils.InstallGetter(GlobalPromise, speciesSymbol, PromiseSpecies);
+
 utils.InstallFunctions(GlobalPromise.prototype, DONT_ENUM, [
   "then", PromiseThen,
   "catch", PromiseCatch
@@ -495,7 +596,7 @@
   "promise_create", PromiseCreate,
   "promise_has_user_defined_reject_handler", PromiseHasUserDefinedRejectHandler,
   "promise_reject", RejectPromise,
-  "promise_resolve", FulfillPromise,
+  "promise_resolve", ResolvePromise,
   "promise_then", PromiseThen,
   "promise_create_rejected", PromiseCreateRejected,
   "promise_create_resolved", PromiseCreateResolved
@@ -506,7 +607,7 @@
 // promise without having to hold on to those closures forever.
 utils.InstallFunctions(extrasUtils, 0, [
   "createPromise", PromiseCreate,
-  "resolvePromise", FulfillPromise,
+  "resolvePromise", ResolvePromise,
   "rejectPromise", RejectPromise
 ]);
 
diff --git a/src/js/regexp.js b/src/js/regexp.js
index 719a081..6b7cf48 100644
--- a/src/js/regexp.js
+++ b/src/js/regexp.js
@@ -22,6 +22,7 @@
 var matchSymbol = utils.ImportNow("match_symbol");
 var replaceSymbol = utils.ImportNow("replace_symbol");
 var searchSymbol = utils.ImportNow("search_symbol");
+var speciesSymbol = utils.ImportNow("species_symbol");
 var splitSymbol = utils.ImportNow("split_symbol");
 var SpeciesConstructor;
 
@@ -323,10 +324,10 @@
     // not a '?'.  But see https://code.google.com/p/v8/issues/detail?id=3560
     var regexp = this;
     var source = REGEXP_SOURCE(regexp);
-    if (regexp.length >= 3 &&
-        %_StringCharCodeAt(regexp, 0) == 46 &&  // '.'
-        %_StringCharCodeAt(regexp, 1) == 42 &&  // '*'
-        %_StringCharCodeAt(regexp, 2) != 63) {  // '?'
+    if (source.length >= 3 &&
+        %_StringCharCodeAt(source, 0) == 46 &&  // '.'
+        %_StringCharCodeAt(source, 1) == 42 &&  // '*'
+        %_StringCharCodeAt(source, 2) != 63) {  // '?'
       regexp = TrimRegExp(regexp);
     }
     // matchIndices is either null or the RegExpLastMatchInfo array.
@@ -537,22 +538,6 @@
 %FunctionRemovePrototype(RegExpSubclassSplit);
 
 
-// Legacy implementation of RegExp.prototype[Symbol.match] which
-// doesn't properly call the underlying exec method
-function RegExpMatch(string) {
-  if (!IS_REGEXP(this)) {
-    throw MakeTypeError(kIncompatibleMethodReceiver,
-                        "RegExp.prototype.@@match", this);
-  }
-  var subject = TO_STRING(string);
-
-  if (!REGEXP_GLOBAL(this)) return RegExpExecNoTests(this, subject, 0);
-  this.lastIndex = 0;
-  var result = %StringMatch(subject, this, RegExpLastMatchInfo);
-  return result;
-}
-
-
 // ES#sec-regexp.prototype-@@match
 // RegExp.prototype [ @@match ] ( string )
 function RegExpSubclassMatch(string) {
@@ -952,19 +937,6 @@
 %FunctionRemovePrototype(RegExpSubclassReplace);
 
 
-// Legacy implementation of RegExp.prototype[Symbol.search] which
-// doesn't properly use the overridden exec method
-function RegExpSearch(string) {
-  if (!IS_REGEXP(this)) {
-    throw MakeTypeError(kIncompatibleMethodReceiver,
-                        "RegExp.prototype.@@search", this);
-  }
-  var match = DoRegExpExec(this, TO_STRING(string), 0);
-  if (match) return match[CAPTURE0];
-  return -1;
-}
-
-
 // ES#sec-regexp.prototype-@@search
 // RegExp.prototype [ @@search ] ( string )
 function RegExpSubclassSearch(string) {
@@ -1132,6 +1104,27 @@
 }
 %SetForceInlineFlag(RegExpGetSticky);
 
+
+// ES6 21.2.5.15.
+function RegExpGetUnicode() {
+  if (!IS_REGEXP(this)) {
+    // TODO(littledan): Remove this RegExp compat workaround
+    if (this === GlobalRegExpPrototype) {
+      %IncrementUseCounter(kRegExpPrototypeUnicodeGetter);
+      return UNDEFINED;
+    }
+    throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.unicode");
+  }
+  return TO_BOOLEAN(REGEXP_UNICODE(this));
+}
+%SetForceInlineFlag(RegExpGetUnicode);
+
+
+function RegExpSpecies() {
+  return this;
+}
+
+
 // -------------------------------------------------------------------
 
 %FunctionSetInstanceClassName(GlobalRegExp, 'RegExp');
@@ -1141,15 +1134,17 @@
     GlobalRegExp.prototype, 'constructor', GlobalRegExp, DONT_ENUM);
 %SetCode(GlobalRegExp, RegExpConstructor);
 
+utils.InstallGetter(GlobalRegExp, speciesSymbol, RegExpSpecies);
+
 utils.InstallFunctions(GlobalRegExp.prototype, DONT_ENUM, [
-  "exec", RegExpExecJS,
-  "test", RegExpTest,
+  "exec", RegExpSubclassExecJS,
+  "test", RegExpSubclassTest,
   "toString", RegExpToString,
   "compile", RegExpCompileJS,
-  matchSymbol, RegExpMatch,
-  replaceSymbol, RegExpReplace,
-  searchSymbol, RegExpSearch,
-  splitSymbol, RegExpSplit,
+  matchSymbol, RegExpSubclassMatch,
+  replaceSymbol, RegExpSubclassReplace,
+  searchSymbol, RegExpSubclassSearch,
+  splitSymbol, RegExpSubclassSplit,
 ]);
 
 utils.InstallGetter(GlobalRegExp.prototype, 'flags', RegExpGetFlags);
@@ -1158,6 +1153,7 @@
 utils.InstallGetter(GlobalRegExp.prototype, 'multiline', RegExpGetMultiline);
 utils.InstallGetter(GlobalRegExp.prototype, 'source', RegExpGetSource);
 utils.InstallGetter(GlobalRegExp.prototype, 'sticky', RegExpGetSticky);
+utils.InstallGetter(GlobalRegExp.prototype, 'unicode', RegExpGetUnicode);
 
 // The properties `input` and `$_` are aliases for each other.  When this
 // value is set the value it is set to is coerced to a string.
@@ -1232,12 +1228,6 @@
   to.RegExpExec = DoRegExpExec;
   to.RegExpInitialize = RegExpInitialize;
   to.RegExpLastMatchInfo = RegExpLastMatchInfo;
-  to.RegExpSubclassExecJS = RegExpSubclassExecJS;
-  to.RegExpSubclassMatch = RegExpSubclassMatch;
-  to.RegExpSubclassReplace = RegExpSubclassReplace;
-  to.RegExpSubclassSearch = RegExpSubclassSearch;
-  to.RegExpSubclassSplit = RegExpSubclassSplit;
-  to.RegExpSubclassTest = RegExpSubclassTest;
   to.RegExpTest = RegExpTest;
 });
 
diff --git a/src/js/runtime.js b/src/js/runtime.js
index a6a0b4d..216685f 100644
--- a/src/js/runtime.js
+++ b/src/js/runtime.js
@@ -16,7 +16,6 @@
 
 %CheckIsBootstrapping();
 
-var FLAG_harmony_species;
 var GlobalArray = global.Array;
 var GlobalBoolean = global.Boolean;
 var GlobalString = global.String;
@@ -30,10 +29,6 @@
   speciesSymbol = from.species_symbol;
 });
 
-utils.ImportFromExperimental(function(from) {
-  FLAG_harmony_species = from.FLAG_harmony_species;
-});
-
 // ----------------------------------------------------------------------------
 
 
@@ -44,7 +39,7 @@
 
 
 function ToPositiveInteger(x, rangeErrorIndex) {
-  var i = TO_INTEGER_MAP_MINUS_ZERO(x);
+  var i = TO_INTEGER(x) + 0;
   if (i < 0) throw MakeRangeError(rangeErrorIndex);
   return i;
 }
@@ -65,35 +60,22 @@
 
 
 // ES2015 7.3.20
-// For the fallback with --harmony-species off, there are two possible choices:
-//  - "conservative": return defaultConstructor
-//  - "not conservative": return object.constructor
-// This fallback path is only needed in the transition to ES2015, and the
-// choice is made simply to preserve the previous behavior so that we don't
-// have a three-step upgrade: old behavior, unspecified intermediate behavior,
-// and ES2015.
-// In some cases, we were "conservative" (e.g., ArrayBuffer, RegExp), and in
-// other cases we were "not conservative (e.g., TypedArray, Promise).
-function SpeciesConstructor(object, defaultConstructor, conservative) {
-  if (FLAG_harmony_species) {
-    var constructor = object.constructor;
-    if (IS_UNDEFINED(constructor)) {
-      return defaultConstructor;
-    }
-    if (!IS_RECEIVER(constructor)) {
-      throw MakeTypeError(kConstructorNotReceiver);
-    }
-    var species = constructor[speciesSymbol];
-    if (IS_NULL_OR_UNDEFINED(species)) {
-      return defaultConstructor;
-    }
-    if (%IsConstructor(species)) {
-      return species;
-    }
-    throw MakeTypeError(kSpeciesNotConstructor);
-  } else {
-    return conservative ? defaultConstructor : object.constructor;
+function SpeciesConstructor(object, defaultConstructor) {
+  var constructor = object.constructor;
+  if (IS_UNDEFINED(constructor)) {
+    return defaultConstructor;
   }
+  if (!IS_RECEIVER(constructor)) {
+    throw MakeTypeError(kConstructorNotReceiver);
+  }
+  var species = constructor[speciesSymbol];
+  if (IS_NULL_OR_UNDEFINED(species)) {
+    return defaultConstructor;
+  }
+  if (%IsConstructor(species)) {
+    return species;
+  }
+  throw MakeTypeError(kSpeciesNotConstructor);
 }
 
 //----------------------------------------------------------------------------
diff --git a/src/js/string.js b/src/js/string.js
index badb2b4..d2eaa32 100644
--- a/src/js/string.js
+++ b/src/js/string.js
@@ -13,8 +13,6 @@
 var ArrayJoin;
 var GlobalRegExp = global.RegExp;
 var GlobalString = global.String;
-var InternalArray = utils.InternalArray;
-var InternalPackedArray = utils.InternalPackedArray;
 var IsRegExp;
 var MakeRangeError;
 var MakeTypeError;
@@ -520,25 +518,6 @@
   return %StringToUpperCase(TO_STRING(this));
 }
 
-// ES5, 15.5.4.20
-function StringTrimJS() {
-  CHECK_OBJECT_COERCIBLE(this, "String.prototype.trim");
-
-  return %StringTrim(TO_STRING(this), true, true);
-}
-
-function StringTrimLeft() {
-  CHECK_OBJECT_COERCIBLE(this, "String.prototype.trimLeft");
-
-  return %StringTrim(TO_STRING(this), true, false);
-}
-
-function StringTrimRight() {
-  CHECK_OBJECT_COERCIBLE(this, "String.prototype.trimRight");
-
-  return %StringTrim(TO_STRING(this), false, true);
-}
-
 
 // ES6 draft, revision 26 (2014-07-18), section B.2.3.2.1
 function HtmlEscape(str) {
@@ -768,33 +747,6 @@
 }
 
 
-// ES6 Draft 05-22-2014, section 21.1.2.2
-function StringFromCodePoint(_) {  // length = 1
-  "use strict";
-  var code;
-  var length = arguments.length;
-  var index;
-  var result = "";
-  for (index = 0; index < length; index++) {
-    code = arguments[index];
-    if (!%_IsSmi(code)) {
-      code = TO_NUMBER(code);
-    }
-    if (code < 0 || code > 0x10FFFF || code !== TO_INTEGER(code)) {
-      throw MakeRangeError(kInvalidCodePoint, code);
-    }
-    if (code <= 0xFFFF) {
-      result += %_StringCharFromCode(code);
-    } else {
-      code -= 0x10000;
-      result += %_StringCharFromCode((code >>> 10) & 0x3FF | 0xD800);
-      result += %_StringCharFromCode(code & 0x3FF | 0xDC00);
-    }
-  }
-  return result;
-}
-
-
 // -------------------------------------------------------------------
 // String methods related to templates
 
@@ -823,7 +775,6 @@
 
 // Set up the non-enumerable functions on the String object.
 utils.InstallFunctions(GlobalString, DONT_ENUM, [
-  "fromCodePoint", StringFromCodePoint,
   "raw", StringRaw
 ]);
 
@@ -852,9 +803,6 @@
   "toLocaleLowerCase", StringToLocaleLowerCase,
   "toUpperCase", StringToUpperCaseJS,
   "toLocaleUpperCase", StringToLocaleUpperCase,
-  "trim", StringTrimJS,
-  "trimLeft", StringTrimLeft,
-  "trimRight", StringTrimRight,
 
   "link", StringLink,
   "anchor", StringAnchor,
diff --git a/src/js/symbol.js b/src/js/symbol.js
index 7365655..2e7cc53 100644
--- a/src/js/symbol.js
+++ b/src/js/symbol.js
@@ -17,6 +17,11 @@
     utils.ImportNow("is_concat_spreadable_symbol");
 var iteratorSymbol = utils.ImportNow("iterator_symbol");
 var MakeTypeError;
+var matchSymbol = utils.ImportNow("match_symbol");
+var replaceSymbol = utils.ImportNow("replace_symbol");
+var searchSymbol = utils.ImportNow("search_symbol");
+var speciesSymbol = utils.ImportNow("species_symbol");
+var splitSymbol = utils.ImportNow("split_symbol");
 var toPrimitiveSymbol = utils.ImportNow("to_primitive_symbol");
 var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
 var unscopablesSymbol = utils.ImportNow("unscopables_symbol");
@@ -78,11 +83,11 @@
   "hasInstance", hasInstanceSymbol,
   "isConcatSpreadable", isConcatSpreadableSymbol,
   "iterator", iteratorSymbol,
-  // TODO(yangguo): expose when implemented.
-  // "match", matchSymbol,
-  // "replace", replaceSymbol,
-  // "search", searchSymbol,
-  // "split, splitSymbol,
+  "match", matchSymbol,
+  "replace", replaceSymbol,
+  "search", searchSymbol,
+  "species", speciesSymbol,
+  "split", splitSymbol,
   "toPrimitive", toPrimitiveSymbol,
   "toStringTag", toStringTagSymbol,
   "unscopables", unscopablesSymbol,
diff --git a/src/js/typedarray.js b/src/js/typedarray.js
index 18f6dde..8e7d9ee 100644
--- a/src/js/typedarray.js
+++ b/src/js/typedarray.js
@@ -47,6 +47,7 @@
 var SpeciesConstructor;
 var ToPositiveInteger;
 var iteratorSymbol = utils.ImportNow("iterator_symbol");
+var speciesSymbol = utils.ImportNow("species_symbol");
 var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
 
 macro TYPED_ARRAYS(FUNCTION)
@@ -68,6 +69,8 @@
 
 TYPED_ARRAYS(DECLARE_GLOBALS)
 
+var GlobalTypedArray = %object_get_prototype_of(GlobalUint8Array);
+
 utils.Import(function(from) {
   ArrayValues = from.ArrayValues;
   GetIterator = from.GetIterator;
@@ -329,42 +332,6 @@
 }
 %SetForceInlineFlag(TypedArraySubArray);
 
-function TypedArrayGetBuffer() {
-  if (!IS_TYPEDARRAY(this)) {
-    throw MakeTypeError(kIncompatibleMethodReceiver,
-                        "get TypedArray.prototype.buffer", this);
-  }
-  return %TypedArrayGetBuffer(this);
-}
-%SetForceInlineFlag(TypedArrayGetBuffer);
-
-function TypedArrayGetByteLength() {
-  if (!IS_TYPEDARRAY(this)) {
-    throw MakeTypeError(kIncompatibleMethodReceiver,
-                        "get TypedArray.prototype.byteLength", this);
-  }
-  return %_ArrayBufferViewGetByteLength(this);
-}
-%SetForceInlineFlag(TypedArrayGetByteLength);
-
-function TypedArrayGetByteOffset() {
-  if (!IS_TYPEDARRAY(this)) {
-    throw MakeTypeError(kIncompatibleMethodReceiver,
-                        "get TypedArray.prototype.byteOffset", this);
-  }
-  return %_ArrayBufferViewGetByteOffset(this);
-}
-%SetForceInlineFlag(TypedArrayGetByteOffset);
-
-function TypedArrayGetLength() {
-  if (!IS_TYPEDARRAY(this)) {
-    throw MakeTypeError(kIncompatibleMethodReceiver,
-                        "get TypedArray.prototype.length", this);
-  }
-  return %_TypedArrayGetLength(this);
-}
-%SetForceInlineFlag(TypedArrayGetLength);
-
 
 
 function TypedArraySetFromArrayLike(target, source, sourceLength, offset) {
@@ -383,7 +350,7 @@
 function TypedArraySetFromOverlappingTypedArray(target, source, offset) {
   var sourceElementSize = source.BYTES_PER_ELEMENT;
   var targetElementSize = target.BYTES_PER_ELEMENT;
-  var sourceLength = source.length;
+  var sourceLength = %_TypedArrayGetLength(source);
 
   // Copy left part.
   function CopyLeftPart() {
@@ -403,7 +370,7 @@
   }
   var leftIndex = CopyLeftPart();
 
-  // Copy rigth part;
+  // Copy right part;
   function CopyRightPart() {
     // First unmutated byte before the next write
     var targetPtr =
@@ -447,7 +414,8 @@
       TypedArraySetFromOverlappingTypedArray(this, obj, intOffset);
       return;
     case 2: // TYPED_ARRAY_SET_TYPED_ARRAY_NONOVERLAPPING
-      TypedArraySetFromArrayLike(this, obj, obj.length, intOffset);
+      TypedArraySetFromArrayLike(this,
+          obj, %_TypedArrayGetLength(obj), intOffset);
       return;
     case 3: // TYPED_ARRAY_SET_NON_TYPED_ARRAY
       var l = obj.length;
@@ -462,7 +430,7 @@
         return;
       }
       l = TO_LENGTH(l);
-      if (intOffset + l > this.length) {
+      if (intOffset + l > %_TypedArrayGetLength(this)) {
         throw MakeRangeError(kTypedArraySetSourceTooLarge);
       }
       TypedArraySetFromArrayLike(this, obj, l, intOffset);
@@ -808,34 +776,31 @@
 }
 %FunctionSetLength(TypedArrayFrom, 1);
 
-function TypedArray() {
+// TODO(bmeurer): Migrate this to a proper builtin.
+function TypedArrayConstructor() {
   if (IS_UNDEFINED(new.target)) {
     throw MakeTypeError(kConstructorNonCallable, "TypedArray");
   }
-  if (new.target === TypedArray) {
+  if (new.target === GlobalTypedArray) {
     throw MakeTypeError(kConstructAbstractClass, "TypedArray");
   }
 }
 
+function TypedArraySpecies() {
+  return this;
+}
+
 // -------------------------------------------------------------------
 
-%FunctionSetPrototype(TypedArray, new GlobalObject());
-%AddNamedProperty(TypedArray.prototype,
-                  "constructor", TypedArray, DONT_ENUM);
-utils.InstallFunctions(TypedArray, DONT_ENUM, [
+%SetCode(GlobalTypedArray, TypedArrayConstructor);
+utils.InstallFunctions(GlobalTypedArray, DONT_ENUM, [
   "from", TypedArrayFrom,
   "of", TypedArrayOf
 ]);
-utils.InstallGetter(TypedArray.prototype, "buffer", TypedArrayGetBuffer);
-utils.InstallGetter(TypedArray.prototype, "byteOffset", TypedArrayGetByteOffset,
-                    DONT_ENUM | DONT_DELETE);
-utils.InstallGetter(TypedArray.prototype, "byteLength",
-                    TypedArrayGetByteLength, DONT_ENUM | DONT_DELETE);
-utils.InstallGetter(TypedArray.prototype, "length", TypedArrayGetLength,
-                    DONT_ENUM | DONT_DELETE);
-utils.InstallGetter(TypedArray.prototype, toStringTagSymbol,
+utils.InstallGetter(GlobalTypedArray, speciesSymbol, TypedArraySpecies);
+utils.InstallGetter(GlobalTypedArray.prototype, toStringTagSymbol,
                     TypedArrayGetToStringTag);
-utils.InstallFunctions(TypedArray.prototype, DONT_ENUM, [
+utils.InstallFunctions(GlobalTypedArray.prototype, DONT_ENUM, [
   "subarray", TypedArraySubArray,
   "set", TypedArraySet,
   "copyWithin", TypedArrayCopyWithin,
@@ -859,15 +824,15 @@
   "toLocaleString", TypedArrayToLocaleString
 ]);
 
-%AddNamedProperty(TypedArray.prototype, "toString", ArrayToString,
+%AddNamedProperty(GlobalTypedArray.prototype, "toString", ArrayToString,
                   DONT_ENUM);
 
 
 macro SETUP_TYPED_ARRAY(ARRAY_ID, NAME, ELEMENT_SIZE)
   %SetCode(GlobalNAME, NAMEConstructor);
   %FunctionSetPrototype(GlobalNAME, new GlobalObject());
-  %InternalSetPrototype(GlobalNAME, TypedArray);
-  %InternalSetPrototype(GlobalNAME.prototype, TypedArray.prototype);
+  %InternalSetPrototype(GlobalNAME, GlobalTypedArray);
+  %InternalSetPrototype(GlobalNAME.prototype, GlobalTypedArray.prototype);
 
   %AddNamedProperty(GlobalNAME, "BYTES_PER_ELEMENT", ELEMENT_SIZE,
                     READ_ONLY | DONT_ENUM | DONT_DELETE);
@@ -883,29 +848,6 @@
 
 // --------------------------- DataView -----------------------------
 
-function DataViewGetBufferJS() {
-  if (!IS_DATAVIEW(this)) {
-    throw MakeTypeError(kIncompatibleMethodReceiver, 'DataView.buffer', this);
-  }
-  return %DataViewGetBuffer(this);
-}
-
-function DataViewGetByteOffset() {
-  if (!IS_DATAVIEW(this)) {
-    throw MakeTypeError(kIncompatibleMethodReceiver,
-                        'DataView.byteOffset', this);
-  }
-  return %_ArrayBufferViewGetByteOffset(this);
-}
-
-function DataViewGetByteLength() {
-  if (!IS_DATAVIEW(this)) {
-    throw MakeTypeError(kIncompatibleMethodReceiver,
-                        'DataView.byteLength', this);
-  }
-  return %_ArrayBufferViewGetByteLength(this);
-}
-
 macro DATA_VIEW_TYPES(FUNCTION)
   FUNCTION(Int8)
   FUNCTION(Uint8)
@@ -944,21 +886,6 @@
 
 DATA_VIEW_TYPES(DATA_VIEW_GETTER_SETTER)
 
-// Setup the DataView constructor.
-%FunctionSetPrototype(GlobalDataView, new GlobalObject);
-
-// Set up constructor property on the DataView prototype.
-%AddNamedProperty(GlobalDataView.prototype, "constructor", GlobalDataView,
-                  DONT_ENUM);
-%AddNamedProperty(GlobalDataView.prototype, toStringTagSymbol, "DataView",
-                  READ_ONLY|DONT_ENUM);
-
-utils.InstallGetter(GlobalDataView.prototype, "buffer", DataViewGetBufferJS);
-utils.InstallGetter(GlobalDataView.prototype, "byteOffset",
-                    DataViewGetByteOffset);
-utils.InstallGetter(GlobalDataView.prototype, "byteLength",
-                    DataViewGetByteLength);
-
 utils.InstallFunctions(GlobalDataView.prototype, DONT_ENUM, [
   "getInt8", DataViewGetInt8JS,
   "setInt8", DataViewSetInt8JS,
diff --git a/src/js/uri.js b/src/js/uri.js
deleted file mode 100644
index 19bfbd3..0000000
--- a/src/js/uri.js
+++ /dev/null
@@ -1,234 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file contains support for URI manipulations written in
-// JavaScript.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-//- ------------------------------------------------------------------
-// Imports
-
-var GlobalObject = global.Object;
-var InternalArray = utils.InternalArray;
-var MakeURIError;
-
-utils.Import(function(from) {
-  MakeURIError = from.MakeURIError;
-});
-
-
-// -------------------------------------------------------------------
-// Define internal helper functions.
-
-function HexValueOf(code) {
-  // 0-9
-  if (code >= 48 && code <= 57) return code - 48;
-  // A-F
-  if (code >= 65 && code <= 70) return code - 55;
-  // a-f
-  if (code >= 97 && code <= 102) return code - 87;
-
-  return -1;
-}
-
-function URIHexCharsToCharCode(highChar, lowChar) {
-  var highCode = HexValueOf(highChar);
-  var lowCode = HexValueOf(lowChar);
-  if (highCode == -1 || lowCode == -1) throw MakeURIError();
-  return (highCode << 4) | lowCode;
-}
-
-// Callers must ensure that |result| is a sufficiently long sequential
-// two-byte string!
-function URIDecodeOctets(octets, result, index) {
-  var value;
-  var o0 = octets[0];
-  if (o0 < 0x80) {
-    value = o0;
-  } else if (o0 < 0xc2) {
-    throw MakeURIError();
-  } else {
-    var o1 = octets[1];
-    if (o0 < 0xe0) {
-      var a = o0 & 0x1f;
-      if ((o1 < 0x80) || (o1 > 0xbf)) throw MakeURIError();
-      var b = o1 & 0x3f;
-      value = (a << 6) + b;
-      if (value < 0x80 || value > 0x7ff) throw MakeURIError();
-    } else {
-      var o2 = octets[2];
-      if (o0 < 0xf0) {
-        var a = o0 & 0x0f;
-        if ((o1 < 0x80) || (o1 > 0xbf)) throw MakeURIError();
-        var b = o1 & 0x3f;
-        if ((o2 < 0x80) || (o2 > 0xbf)) throw MakeURIError();
-        var c = o2 & 0x3f;
-        value = (a << 12) + (b << 6) + c;
-        if ((value < 0x800) || (value > 0xffff)) throw MakeURIError();
-      } else {
-        var o3 = octets[3];
-        if (o0 < 0xf8) {
-          var a = (o0 & 0x07);
-          if ((o1 < 0x80) || (o1 > 0xbf)) throw MakeURIError();
-          var b = (o1 & 0x3f);
-          if ((o2 < 0x80) || (o2 > 0xbf)) {
-            throw MakeURIError();
-          }
-          var c = (o2 & 0x3f);
-          if ((o3 < 0x80) || (o3 > 0xbf)) throw MakeURIError();
-          var d = (o3 & 0x3f);
-          value = (a << 18) + (b << 12) + (c << 6) + d;
-          if ((value < 0x10000) || (value > 0x10ffff)) throw MakeURIError();
-        } else {
-          throw MakeURIError();
-        }
-      }
-    }
-  }
-  if (0xD800 <= value && value <= 0xDFFF) throw MakeURIError();
-  if (value < 0x10000) {
-    %_TwoByteSeqStringSetChar(index++, value, result);
-  } else {
-    %_TwoByteSeqStringSetChar(index++, (value >> 10) + 0xd7c0, result);
-    %_TwoByteSeqStringSetChar(index++, (value & 0x3ff) + 0xdc00, result);
-  }
-  return index;
-}
-
-// ECMA-262, section 15.1.3
-function Decode(uri, reserved) {
-  uri = TO_STRING(uri);
-  var uriLength = uri.length;
-  var one_byte = %NewString(uriLength, NEW_ONE_BYTE_STRING);
-  var index = 0;
-  var k = 0;
-
-  // Optimistically assume one-byte string.
-  for ( ; k < uriLength; k++) {
-    var code = %_StringCharCodeAt(uri, k);
-    if (code == 37) {  // '%'
-      if (k + 2 >= uriLength) throw MakeURIError();
-      var cc = URIHexCharsToCharCode(%_StringCharCodeAt(uri, k+1),
-                                     %_StringCharCodeAt(uri, k+2));
-      if (cc >> 7) break;  // Assumption wrong, two-byte string.
-      if (reserved(cc)) {
-        %_OneByteSeqStringSetChar(index++, 37, one_byte);  // '%'.
-        %_OneByteSeqStringSetChar(index++, %_StringCharCodeAt(uri, k+1),
-                                  one_byte);
-        %_OneByteSeqStringSetChar(index++, %_StringCharCodeAt(uri, k+2),
-                                  one_byte);
-      } else {
-        %_OneByteSeqStringSetChar(index++, cc, one_byte);
-      }
-      k += 2;
-    } else {
-      if (code > 0x7f) break;  // Assumption wrong, two-byte string.
-      %_OneByteSeqStringSetChar(index++, code, one_byte);
-    }
-  }
-
-  one_byte = %TruncateString(one_byte, index);
-  if (k == uriLength) return one_byte;
-
-  // Write into two byte string.
-  var two_byte = %NewString(uriLength - k, NEW_TWO_BYTE_STRING);
-  index = 0;
-
-  for ( ; k < uriLength; k++) {
-    var code = %_StringCharCodeAt(uri, k);
-    if (code == 37) {  // '%'
-      if (k + 2 >= uriLength) throw MakeURIError();
-      var cc = URIHexCharsToCharCode(%_StringCharCodeAt(uri, ++k),
-                                     %_StringCharCodeAt(uri, ++k));
-      if (cc >> 7) {
-        var n = 0;
-        while (((cc << ++n) & 0x80) != 0) { }
-        if (n == 1 || n > 4) throw MakeURIError();
-        var octets = new InternalArray(n);
-        octets[0] = cc;
-        if (k + 3 * (n - 1) >= uriLength) throw MakeURIError();
-        for (var i = 1; i < n; i++) {
-          if (uri[++k] != '%') throw MakeURIError();
-          octets[i] = URIHexCharsToCharCode(%_StringCharCodeAt(uri, ++k),
-                                            %_StringCharCodeAt(uri, ++k));
-        }
-        index = URIDecodeOctets(octets, two_byte, index);
-      } else  if (reserved(cc)) {
-        %_TwoByteSeqStringSetChar(index++, 37, two_byte);  // '%'.
-        %_TwoByteSeqStringSetChar(index++, %_StringCharCodeAt(uri, k - 1),
-                                  two_byte);
-        %_TwoByteSeqStringSetChar(index++, %_StringCharCodeAt(uri, k),
-                                  two_byte);
-      } else {
-        %_TwoByteSeqStringSetChar(index++, cc, two_byte);
-      }
-    } else {
-      %_TwoByteSeqStringSetChar(index++, code, two_byte);
-    }
-  }
-
-  two_byte = %TruncateString(two_byte, index);
-  return one_byte + two_byte;
-}
-
-// -------------------------------------------------------------------
-// Define exported functions.
-
-// ECMA-262 - B.2.1.
-function URIEscapeJS(s) {
-  return %URIEscape(s);
-}
-
-// ECMA-262 - B.2.2.
-function URIUnescapeJS(s) {
-  return %URIUnescape(s);
-}
-
-// ECMA-262 - 15.1.3.1.
-function URIDecode(uri) {
-  var reservedPredicate = function(cc) {
-    // #$
-    if (35 <= cc && cc <= 36) return true;
-    // &
-    if (cc == 38) return true;
-    // +,
-    if (43 <= cc && cc <= 44) return true;
-    // /
-    if (cc == 47) return true;
-    // :;
-    if (58 <= cc && cc <= 59) return true;
-    // =
-    if (cc == 61) return true;
-    // ?@
-    if (63 <= cc && cc <= 64) return true;
-
-    return false;
-  };
-  return Decode(uri, reservedPredicate);
-}
-
-// ECMA-262 - 15.1.3.2.
-function URIDecodeComponent(component) {
-  var reservedPredicate = function(cc) { return false; };
-  return Decode(component, reservedPredicate);
-}
-
-// -------------------------------------------------------------------
-// Install exported functions.
-
-// Set up non-enumerable URI functions on the global object and set
-// their names.
-utils.InstallFunctions(global, DONT_ENUM, [
-  "escape", URIEscapeJS,
-  "unescape", URIUnescapeJS,
-  "decodeURI", URIDecode,
-  "decodeURIComponent", URIDecodeComponent
-]);
-
-})
diff --git a/src/js/v8natives.js b/src/js/v8natives.js
index 44be941..9e437a3 100644
--- a/src/js/v8natives.js
+++ b/src/js/v8natives.js
@@ -12,7 +12,6 @@
 var GlobalArray = global.Array;
 var GlobalNumber = global.Number;
 var GlobalObject = global.Object;
-var InternalArray = utils.InternalArray;
 var iteratorSymbol = utils.ImportNow("iterator_symbol");
 var MakeRangeError;
 var MakeSyntaxError;
@@ -450,7 +449,6 @@
 utils.Export(function(to) {
   to.GetIterator = GetIterator;
   to.GetMethod = GetMethod;
-  to.IsFinite = GlobalIsFinite;
   to.IsNaN = GlobalIsNaN;
   to.NumberIsNaN = NumberIsNaN;
   to.NumberIsInteger = NumberIsInteger;
diff --git a/src/json-parser.cc b/src/json-parser.cc
new file mode 100644
index 0000000..bf2fd0d
--- /dev/null
+++ b/src/json-parser.cc
@@ -0,0 +1,812 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/json-parser.h"
+
+#include "src/char-predicates-inl.h"
+#include "src/conversions.h"
+#include "src/debug/debug.h"
+#include "src/factory.h"
+#include "src/field-type.h"
+#include "src/messages.h"
+#include "src/objects-inl.h"
+#include "src/parsing/scanner.h"
+#include "src/parsing/token.h"
+#include "src/property-descriptor.h"
+#include "src/transitions.h"
+
+namespace v8 {
+namespace internal {
+
+MaybeHandle<Object> JsonParseInternalizer::Internalize(Isolate* isolate,
+                                                       Handle<Object> object,
+                                                       Handle<Object> reviver) {
+  DCHECK(reviver->IsCallable());
+  JsonParseInternalizer internalizer(isolate,
+                                     Handle<JSReceiver>::cast(reviver));
+  Handle<JSObject> holder =
+      isolate->factory()->NewJSObject(isolate->object_function());
+  Handle<String> name = isolate->factory()->empty_string();
+  JSObject::AddProperty(holder, name, object, NONE);
+  return internalizer.InternalizeJsonProperty(holder, name);
+}
+
+MaybeHandle<Object> JsonParseInternalizer::InternalizeJsonProperty(
+    Handle<JSReceiver> holder, Handle<String> name) {
+  HandleScope outer_scope(isolate_);
+  Handle<Object> value;
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate_, value, Object::GetPropertyOrElement(holder, name), Object);
+  if (value->IsJSReceiver()) {
+    Handle<JSReceiver> object = Handle<JSReceiver>::cast(value);
+    Maybe<bool> is_array = Object::IsArray(object);
+    if (is_array.IsNothing()) return MaybeHandle<Object>();
+    if (is_array.FromJust()) {
+      Handle<Object> length_object;
+      ASSIGN_RETURN_ON_EXCEPTION(
+          isolate_, length_object,
+          Object::GetLengthFromArrayLike(isolate_, object), Object);
+      double length = length_object->Number();
+      for (double i = 0; i < length; i++) {
+        HandleScope inner_scope(isolate_);
+        Handle<Object> index = isolate_->factory()->NewNumber(i);
+        Handle<String> name = isolate_->factory()->NumberToString(index);
+        if (!RecurseAndApply(object, name)) return MaybeHandle<Object>();
+      }
+    } else {
+      Handle<FixedArray> contents;
+      ASSIGN_RETURN_ON_EXCEPTION(
+          isolate_, contents,
+          KeyAccumulator::GetKeys(object, KeyCollectionMode::kOwnOnly,
+                                  ENUMERABLE_STRINGS,
+                                  GetKeysConversion::kConvertToString),
+          Object);
+      for (int i = 0; i < contents->length(); i++) {
+        HandleScope inner_scope(isolate_);
+        Handle<String> name(String::cast(contents->get(i)), isolate_);
+        if (!RecurseAndApply(object, name)) return MaybeHandle<Object>();
+      }
+    }
+  }
+  Handle<Object> argv[] = {name, value};
+  Handle<Object> result;
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate_, result, Execution::Call(isolate_, reviver_, holder, 2, argv),
+      Object);
+  return outer_scope.CloseAndEscape(result);
+}
+
+bool JsonParseInternalizer::RecurseAndApply(Handle<JSReceiver> holder,
+                                            Handle<String> name) {
+  Handle<Object> result;
+  ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+      isolate_, result, InternalizeJsonProperty(holder, name), false);
+  Maybe<bool> change_result = Nothing<bool>();
+  if (result->IsUndefined(isolate_)) {
+    change_result = JSReceiver::DeletePropertyOrElement(holder, name, SLOPPY);
+  } else {
+    PropertyDescriptor desc;
+    desc.set_value(result);
+    desc.set_configurable(true);
+    desc.set_enumerable(true);
+    desc.set_writable(true);
+    change_result = JSReceiver::DefineOwnProperty(isolate_, holder, name, &desc,
+                                                  Object::DONT_THROW);
+  }
+  MAYBE_RETURN(change_result, false);
+  return true;
+}
+
+template <bool seq_one_byte>
+JsonParser<seq_one_byte>::JsonParser(Isolate* isolate, Handle<String> source)
+    : source_(source),
+      source_length_(source->length()),
+      isolate_(isolate),
+      factory_(isolate_->factory()),
+      zone_(isolate_->allocator()),
+      object_constructor_(isolate_->native_context()->object_function(),
+                          isolate_),
+      position_(-1) {
+  source_ = String::Flatten(source_);
+  pretenure_ = (source_length_ >= kPretenureTreshold) ? TENURED : NOT_TENURED;
+
+  // Optimized fast case where we only have Latin1 characters.
+  if (seq_one_byte) {
+    seq_source_ = Handle<SeqOneByteString>::cast(source_);
+  }
+}
+
+template <bool seq_one_byte>
+MaybeHandle<Object> JsonParser<seq_one_byte>::ParseJson() {
+  // Advance to the first character (possibly EOS)
+  AdvanceSkipWhitespace();
+  Handle<Object> result = ParseJsonValue();
+  if (result.is_null() || c0_ != kEndOfString) {
+    // Some exception (for example stack overflow) is already pending.
+    if (isolate_->has_pending_exception()) return Handle<Object>::null();
+
+    // Parse failed. Current character is the unexpected token.
+    Factory* factory = this->factory();
+    MessageTemplate::Template message;
+    Handle<Object> arg1 = Handle<Smi>(Smi::FromInt(position_), isolate());
+    Handle<Object> arg2;
+
+    switch (c0_) {
+      case kEndOfString:
+        message = MessageTemplate::kJsonParseUnexpectedEOS;
+        break;
+      case '-':
+      case '0':
+      case '1':
+      case '2':
+      case '3':
+      case '4':
+      case '5':
+      case '6':
+      case '7':
+      case '8':
+      case '9':
+        message = MessageTemplate::kJsonParseUnexpectedTokenNumber;
+        break;
+      case '"':
+        message = MessageTemplate::kJsonParseUnexpectedTokenString;
+        break;
+      default:
+        message = MessageTemplate::kJsonParseUnexpectedToken;
+        arg2 = arg1;
+        arg1 = factory->LookupSingleCharacterStringFromCode(c0_);
+        break;
+    }
+
+    Handle<Script> script(factory->NewScript(source_));
+    // We should sent compile error event because we compile JSON object in
+    // separated source file.
+    isolate()->debug()->OnCompileError(script);
+    MessageLocation location(script, position_, position_ + 1);
+    Handle<Object> error = factory->NewSyntaxError(message, arg1, arg2);
+    return isolate()->template Throw<Object>(error, &location);
+  }
+  return result;
+}
+
+MaybeHandle<Object> InternalizeJsonProperty(Handle<JSObject> holder,
+                                            Handle<String> key);
+
+template <bool seq_one_byte>
+void JsonParser<seq_one_byte>::Advance() {
+  position_++;
+  if (position_ >= source_length_) {
+    c0_ = kEndOfString;
+  } else if (seq_one_byte) {
+    c0_ = seq_source_->SeqOneByteStringGet(position_);
+  } else {
+    c0_ = source_->Get(position_);
+  }
+}
+
+template <bool seq_one_byte>
+void JsonParser<seq_one_byte>::AdvanceSkipWhitespace() {
+  do {
+    Advance();
+  } while (c0_ == ' ' || c0_ == '\t' || c0_ == '\n' || c0_ == '\r');
+}
+
+template <bool seq_one_byte>
+void JsonParser<seq_one_byte>::SkipWhitespace() {
+  while (c0_ == ' ' || c0_ == '\t' || c0_ == '\n' || c0_ == '\r') {
+    Advance();
+  }
+}
+
+template <bool seq_one_byte>
+uc32 JsonParser<seq_one_byte>::AdvanceGetChar() {
+  Advance();
+  return c0_;
+}
+
+template <bool seq_one_byte>
+bool JsonParser<seq_one_byte>::MatchSkipWhiteSpace(uc32 c) {
+  if (c0_ == c) {
+    AdvanceSkipWhitespace();
+    return true;
+  }
+  return false;
+}
+
+template <bool seq_one_byte>
+bool JsonParser<seq_one_byte>::ParseJsonString(Handle<String> expected) {
+  int length = expected->length();
+  if (source_->length() - position_ - 1 > length) {
+    DisallowHeapAllocation no_gc;
+    String::FlatContent content = expected->GetFlatContent();
+    if (content.IsOneByte()) {
+      DCHECK_EQ('"', c0_);
+      const uint8_t* input_chars = seq_source_->GetChars() + position_ + 1;
+      const uint8_t* expected_chars = content.ToOneByteVector().start();
+      for (int i = 0; i < length; i++) {
+        uint8_t c0 = input_chars[i];
+        if (c0 != expected_chars[i] || c0 == '"' || c0 < 0x20 || c0 == '\\') {
+          return false;
+        }
+      }
+      if (input_chars[length] == '"') {
+        position_ = position_ + length + 1;
+        AdvanceSkipWhitespace();
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
+// Parse any JSON value.
+template <bool seq_one_byte>
+Handle<Object> JsonParser<seq_one_byte>::ParseJsonValue() {
+  StackLimitCheck stack_check(isolate_);
+  if (stack_check.HasOverflowed()) {
+    isolate_->StackOverflow();
+    return Handle<Object>::null();
+  }
+
+  if (stack_check.InterruptRequested() &&
+      isolate_->stack_guard()->HandleInterrupts()->IsException(isolate_)) {
+    return Handle<Object>::null();
+  }
+
+  if (c0_ == '"') return ParseJsonString();
+  if ((c0_ >= '0' && c0_ <= '9') || c0_ == '-') return ParseJsonNumber();
+  if (c0_ == '{') return ParseJsonObject();
+  if (c0_ == '[') return ParseJsonArray();
+  if (c0_ == 'f') {
+    if (AdvanceGetChar() == 'a' && AdvanceGetChar() == 'l' &&
+        AdvanceGetChar() == 's' && AdvanceGetChar() == 'e') {
+      AdvanceSkipWhitespace();
+      return factory()->false_value();
+    }
+    return ReportUnexpectedCharacter();
+  }
+  if (c0_ == 't') {
+    if (AdvanceGetChar() == 'r' && AdvanceGetChar() == 'u' &&
+        AdvanceGetChar() == 'e') {
+      AdvanceSkipWhitespace();
+      return factory()->true_value();
+    }
+    return ReportUnexpectedCharacter();
+  }
+  if (c0_ == 'n') {
+    if (AdvanceGetChar() == 'u' && AdvanceGetChar() == 'l' &&
+        AdvanceGetChar() == 'l') {
+      AdvanceSkipWhitespace();
+      return factory()->null_value();
+    }
+    return ReportUnexpectedCharacter();
+  }
+  return ReportUnexpectedCharacter();
+}
+
+template <bool seq_one_byte>
+ParseElementResult JsonParser<seq_one_byte>::ParseElement(
+    Handle<JSObject> json_object) {
+  uint32_t index = 0;
+  // Maybe an array index, try to parse it.
+  if (c0_ == '0') {
+    // With a leading zero, the string has to be "0" only to be an index.
+    Advance();
+  } else {
+    do {
+      int d = c0_ - '0';
+      if (index > 429496729U - ((d + 3) >> 3)) break;
+      index = (index * 10) + d;
+      Advance();
+    } while (IsDecimalDigit(c0_));
+  }
+
+  if (c0_ == '"') {
+    // Successfully parsed index, parse and store element.
+    AdvanceSkipWhitespace();
+
+    if (c0_ == ':') {
+      AdvanceSkipWhitespace();
+      Handle<Object> value = ParseJsonValue();
+      if (!value.is_null()) {
+        JSObject::SetOwnElementIgnoreAttributes(json_object, index, value, NONE)
+            .Assert();
+        return kElementFound;
+      } else {
+        return kNullHandle;
+      }
+    }
+  }
+  return kElementNotFound;
+}
+
+// Parse a JSON object. Position must be right at '{'.
+template <bool seq_one_byte>
+Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
+  HandleScope scope(isolate());
+  Handle<JSObject> json_object =
+      factory()->NewJSObject(object_constructor(), pretenure_);
+  Handle<Map> map(json_object->map());
+  int descriptor = 0;
+  ZoneList<Handle<Object> > properties(8, zone());
+  DCHECK_EQ(c0_, '{');
+
+  bool transitioning = true;
+
+  AdvanceSkipWhitespace();
+  if (c0_ != '}') {
+    do {
+      if (c0_ != '"') return ReportUnexpectedCharacter();
+
+      int start_position = position_;
+      Advance();
+
+      if (IsDecimalDigit(c0_)) {
+        ParseElementResult element_result = ParseElement(json_object);
+        if (element_result == kNullHandle) return Handle<Object>::null();
+        if (element_result == kElementFound) continue;
+      }
+      // Not an index, fallback to the slow path.
+
+      position_ = start_position;
+#ifdef DEBUG
+      c0_ = '"';
+#endif
+
+      Handle<String> key;
+      Handle<Object> value;
+
+      // Try to follow existing transitions as long as possible. Once we stop
+      // transitioning, no transition can be found anymore.
+      DCHECK(transitioning);
+      // First check whether there is a single expected transition. If so, try
+      // to parse it first.
+      bool follow_expected = false;
+      Handle<Map> target;
+      if (seq_one_byte) {
+        key = TransitionArray::ExpectedTransitionKey(map);
+        follow_expected = !key.is_null() && ParseJsonString(key);
+      }
+      // If the expected transition hits, follow it.
+      if (follow_expected) {
+        target = TransitionArray::ExpectedTransitionTarget(map);
+      } else {
+        // If the expected transition failed, parse an internalized string and
+        // try to find a matching transition.
+        key = ParseJsonInternalizedString();
+        if (key.is_null()) return ReportUnexpectedCharacter();
+
+        target = TransitionArray::FindTransitionToField(map, key);
+        // If a transition was found, follow it and continue.
+        transitioning = !target.is_null();
+      }
+      if (c0_ != ':') return ReportUnexpectedCharacter();
+
+      AdvanceSkipWhitespace();
+      value = ParseJsonValue();
+      if (value.is_null()) return ReportUnexpectedCharacter();
+
+      if (transitioning) {
+        PropertyDetails details =
+            target->instance_descriptors()->GetDetails(descriptor);
+        Representation expected_representation = details.representation();
+
+        if (value->FitsRepresentation(expected_representation)) {
+          if (expected_representation.IsHeapObject() &&
+              !target->instance_descriptors()
+                   ->GetFieldType(descriptor)
+                   ->NowContains(value)) {
+            Handle<FieldType> value_type(
+                value->OptimalType(isolate(), expected_representation));
+            Map::GeneralizeFieldType(target, descriptor,
+                                     expected_representation, value_type);
+          }
+          DCHECK(target->instance_descriptors()
+                     ->GetFieldType(descriptor)
+                     ->NowContains(value));
+          properties.Add(value, zone());
+          map = target;
+          descriptor++;
+          continue;
+        } else {
+          transitioning = false;
+        }
+      }
+
+      DCHECK(!transitioning);
+
+      // Commit the intermediate state to the object and stop transitioning.
+      CommitStateToJsonObject(json_object, map, &properties);
+
+      JSObject::DefinePropertyOrElementIgnoreAttributes(json_object, key, value)
+          .Check();
+    } while (transitioning && MatchSkipWhiteSpace(','));
+
+    // If we transitioned until the very end, transition the map now.
+    if (transitioning) {
+      CommitStateToJsonObject(json_object, map, &properties);
+    } else {
+      while (MatchSkipWhiteSpace(',')) {
+        HandleScope local_scope(isolate());
+        if (c0_ != '"') return ReportUnexpectedCharacter();
+
+        int start_position = position_;
+        Advance();
+
+        if (IsDecimalDigit(c0_)) {
+          ParseElementResult element_result = ParseElement(json_object);
+          if (element_result == kNullHandle) return Handle<Object>::null();
+          if (element_result == kElementFound) continue;
+        }
+        // Not an index, fallback to the slow path.
+
+        position_ = start_position;
+#ifdef DEBUG
+        c0_ = '"';
+#endif
+
+        Handle<String> key;
+        Handle<Object> value;
+
+        key = ParseJsonInternalizedString();
+        if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
+
+        AdvanceSkipWhitespace();
+        value = ParseJsonValue();
+        if (value.is_null()) return ReportUnexpectedCharacter();
+
+        JSObject::DefinePropertyOrElementIgnoreAttributes(json_object, key,
+                                                          value)
+            .Check();
+      }
+    }
+
+    if (c0_ != '}') {
+      return ReportUnexpectedCharacter();
+    }
+  }
+  AdvanceSkipWhitespace();
+  return scope.CloseAndEscape(json_object);
+}
+
+template <bool seq_one_byte>
+void JsonParser<seq_one_byte>::CommitStateToJsonObject(
+    Handle<JSObject> json_object, Handle<Map> map,
+    ZoneList<Handle<Object> >* properties) {
+  JSObject::AllocateStorageForMap(json_object, map);
+  DCHECK(!json_object->map()->is_dictionary_map());
+
+  DisallowHeapAllocation no_gc;
+
+  int length = properties->length();
+  for (int i = 0; i < length; i++) {
+    Handle<Object> value = (*properties)[i];
+    json_object->WriteToField(i, *value);
+  }
+}
+
+// Parse a JSON array. Position must be right at '['.
+template <bool seq_one_byte>
+Handle<Object> JsonParser<seq_one_byte>::ParseJsonArray() {
+  HandleScope scope(isolate());
+  ZoneList<Handle<Object> > elements(4, zone());
+  DCHECK_EQ(c0_, '[');
+
+  AdvanceSkipWhitespace();
+  if (c0_ != ']') {
+    do {
+      Handle<Object> element = ParseJsonValue();
+      if (element.is_null()) return ReportUnexpectedCharacter();
+      elements.Add(element, zone());
+    } while (MatchSkipWhiteSpace(','));
+    if (c0_ != ']') {
+      return ReportUnexpectedCharacter();
+    }
+  }
+  AdvanceSkipWhitespace();
+  // Allocate a fixed array with all the elements.
+  Handle<FixedArray> fast_elements =
+      factory()->NewFixedArray(elements.length(), pretenure_);
+  for (int i = 0, n = elements.length(); i < n; i++) {
+    fast_elements->set(i, *elements[i]);
+  }
+  Handle<Object> json_array = factory()->NewJSArrayWithElements(
+      fast_elements, FAST_ELEMENTS, pretenure_);
+  return scope.CloseAndEscape(json_array);
+}
+
+template <bool seq_one_byte>
+Handle<Object> JsonParser<seq_one_byte>::ParseJsonNumber() {
+  bool negative = false;
+  int beg_pos = position_;
+  if (c0_ == '-') {
+    Advance();
+    negative = true;
+  }
+  if (c0_ == '0') {
+    Advance();
+    // Prefix zero is only allowed if it's the only digit before
+    // a decimal point or exponent.
+    if (IsDecimalDigit(c0_)) return ReportUnexpectedCharacter();
+  } else {
+    int i = 0;
+    int digits = 0;
+    if (c0_ < '1' || c0_ > '9') return ReportUnexpectedCharacter();
+    do {
+      i = i * 10 + c0_ - '0';
+      digits++;
+      Advance();
+    } while (IsDecimalDigit(c0_));
+    if (c0_ != '.' && c0_ != 'e' && c0_ != 'E' && digits < 10) {
+      SkipWhitespace();
+      return Handle<Smi>(Smi::FromInt((negative ? -i : i)), isolate());
+    }
+  }
+  if (c0_ == '.') {
+    Advance();
+    if (!IsDecimalDigit(c0_)) return ReportUnexpectedCharacter();
+    do {
+      Advance();
+    } while (IsDecimalDigit(c0_));
+  }
+  if (AsciiAlphaToLower(c0_) == 'e') {
+    Advance();
+    if (c0_ == '-' || c0_ == '+') Advance();
+    if (!IsDecimalDigit(c0_)) return ReportUnexpectedCharacter();
+    do {
+      Advance();
+    } while (IsDecimalDigit(c0_));
+  }
+  int length = position_ - beg_pos;
+  double number;
+  if (seq_one_byte) {
+    Vector<const uint8_t> chars(seq_source_->GetChars() + beg_pos, length);
+    number = StringToDouble(isolate()->unicode_cache(), chars,
+                            NO_FLAGS,  // Hex, octal or trailing junk.
+                            std::numeric_limits<double>::quiet_NaN());
+  } else {
+    Vector<uint8_t> buffer = Vector<uint8_t>::New(length);
+    String::WriteToFlat(*source_, buffer.start(), beg_pos, position_);
+    Vector<const uint8_t> result =
+        Vector<const uint8_t>(buffer.start(), length);
+    number = StringToDouble(isolate()->unicode_cache(), result,
+                            NO_FLAGS,  // Hex, octal or trailing junk.
+                            0.0);
+    buffer.Dispose();
+  }
+  SkipWhitespace();
+  return factory()->NewNumber(number, pretenure_);
+}
+
+template <typename StringType>
+inline void SeqStringSet(Handle<StringType> seq_str, int i, uc32 c);
+
+template <>
+inline void SeqStringSet(Handle<SeqTwoByteString> seq_str, int i, uc32 c) {
+  seq_str->SeqTwoByteStringSet(i, c);
+}
+
+template <>
+inline void SeqStringSet(Handle<SeqOneByteString> seq_str, int i, uc32 c) {
+  seq_str->SeqOneByteStringSet(i, c);
+}
+
+template <typename StringType>
+inline Handle<StringType> NewRawString(Factory* factory, int length,
+                                       PretenureFlag pretenure);
+
+template <>
+inline Handle<SeqTwoByteString> NewRawString(Factory* factory, int length,
+                                             PretenureFlag pretenure) {
+  return factory->NewRawTwoByteString(length, pretenure).ToHandleChecked();
+}
+
+template <>
+inline Handle<SeqOneByteString> NewRawString(Factory* factory, int length,
+                                             PretenureFlag pretenure) {
+  return factory->NewRawOneByteString(length, pretenure).ToHandleChecked();
+}
+
+// Scans the rest of a JSON string starting from position_ and writes
+// prefix[start..end] along with the scanned characters into a
+// sequential string of type StringType.
+template <bool seq_one_byte>
+template <typename StringType, typename SinkChar>
+Handle<String> JsonParser<seq_one_byte>::SlowScanJsonString(
+    Handle<String> prefix, int start, int end) {
+  int count = end - start;
+  int max_length = count + source_length_ - position_;
+  int length = Min(max_length, Max(kInitialSpecialStringLength, 2 * count));
+  Handle<StringType> seq_string =
+      NewRawString<StringType>(factory(), length, pretenure_);
+  // Copy prefix into seq_str.
+  SinkChar* dest = seq_string->GetChars();
+  String::WriteToFlat(*prefix, dest, start, end);
+
+  while (c0_ != '"') {
+    // Check for control character (0x00-0x1f) or unterminated string (<0).
+    if (c0_ < 0x20) return Handle<String>::null();
+    if (count >= length) {
+      // We need to create a longer sequential string for the result.
+      return SlowScanJsonString<StringType, SinkChar>(seq_string, 0, count);
+    }
+    if (c0_ != '\\') {
+      // If the sink can contain UC16 characters, or source_ contains only
+      // Latin1 characters, there's no need to test whether we can store the
+      // character. Otherwise check whether the UC16 source character can fit
+      // in the Latin1 sink.
+      if (sizeof(SinkChar) == kUC16Size || seq_one_byte ||
+          c0_ <= String::kMaxOneByteCharCode) {
+        SeqStringSet(seq_string, count++, c0_);
+        Advance();
+      } else {
+        // StringType is SeqOneByteString and we just read a non-Latin1 char.
+        return SlowScanJsonString<SeqTwoByteString, uc16>(seq_string, 0, count);
+      }
+    } else {
+      Advance();  // Advance past the \.
+      switch (c0_) {
+        case '"':
+        case '\\':
+        case '/':
+          SeqStringSet(seq_string, count++, c0_);
+          break;
+        case 'b':
+          SeqStringSet(seq_string, count++, '\x08');
+          break;
+        case 'f':
+          SeqStringSet(seq_string, count++, '\x0c');
+          break;
+        case 'n':
+          SeqStringSet(seq_string, count++, '\x0a');
+          break;
+        case 'r':
+          SeqStringSet(seq_string, count++, '\x0d');
+          break;
+        case 't':
+          SeqStringSet(seq_string, count++, '\x09');
+          break;
+        case 'u': {
+          uc32 value = 0;
+          for (int i = 0; i < 4; i++) {
+            Advance();
+            int digit = HexValue(c0_);
+            if (digit < 0) {
+              return Handle<String>::null();
+            }
+            value = value * 16 + digit;
+          }
+          if (sizeof(SinkChar) == kUC16Size ||
+              value <= String::kMaxOneByteCharCode) {
+            SeqStringSet(seq_string, count++, value);
+            break;
+          } else {
+            // StringType is SeqOneByteString and we just read a non-Latin1
+            // char.
+            position_ -= 6;  // Rewind position_ to \ in \uxxxx.
+            Advance();
+            return SlowScanJsonString<SeqTwoByteString, uc16>(seq_string, 0,
+                                                              count);
+          }
+        }
+        default:
+          return Handle<String>::null();
+      }
+      Advance();
+    }
+  }
+
+  DCHECK_EQ('"', c0_);
+  // Advance past the last '"'.
+  AdvanceSkipWhitespace();
+
+  // Shrink seq_string length to count and return.
+  return SeqString::Truncate(seq_string, count);
+}
+
+template <bool seq_one_byte>
+template <bool is_internalized>
+Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
+  DCHECK_EQ('"', c0_);
+  Advance();
+  if (c0_ == '"') {
+    AdvanceSkipWhitespace();
+    return factory()->empty_string();
+  }
+
+  if (seq_one_byte && is_internalized) {
+    // Fast path for existing internalized strings.  If the the string being
+    // parsed is not a known internalized string, contains backslashes or
+    // unexpectedly reaches the end of string, return with an empty handle.
+    uint32_t running_hash = isolate()->heap()->HashSeed();
+    int position = position_;
+    uc32 c0 = c0_;
+    do {
+      if (c0 == '\\') {
+        c0_ = c0;
+        int beg_pos = position_;
+        position_ = position;
+        return SlowScanJsonString<SeqOneByteString, uint8_t>(source_, beg_pos,
+                                                             position_);
+      }
+      if (c0 < 0x20) return Handle<String>::null();
+      running_hash = StringHasher::AddCharacterCore(running_hash,
+                                                    static_cast<uint16_t>(c0));
+      position++;
+      if (position >= source_length_) return Handle<String>::null();
+      c0 = seq_source_->SeqOneByteStringGet(position);
+    } while (c0 != '"');
+    int length = position - position_;
+    uint32_t hash = (length <= String::kMaxHashCalcLength)
+                        ? StringHasher::GetHashCore(running_hash)
+                        : static_cast<uint32_t>(length);
+    Vector<const uint8_t> string_vector(seq_source_->GetChars() + position_,
+                                        length);
+    StringTable* string_table = isolate()->heap()->string_table();
+    uint32_t capacity = string_table->Capacity();
+    uint32_t entry = StringTable::FirstProbe(hash, capacity);
+    uint32_t count = 1;
+    Handle<String> result;
+    while (true) {
+      Object* element = string_table->KeyAt(entry);
+      if (element->IsUndefined(isolate())) {
+        // Lookup failure.
+        result =
+            factory()->InternalizeOneByteString(seq_source_, position_, length);
+        break;
+      }
+      if (!element->IsTheHole(isolate()) &&
+          String::cast(element)->IsOneByteEqualTo(string_vector)) {
+        result = Handle<String>(String::cast(element), isolate());
+#ifdef DEBUG
+        uint32_t hash_field =
+            (hash << String::kHashShift) | String::kIsNotArrayIndexMask;
+        DCHECK_EQ(static_cast<int>(result->Hash()),
+                  static_cast<int>(hash_field >> String::kHashShift));
+#endif
+        break;
+      }
+      entry = StringTable::NextProbe(entry, count++, capacity);
+    }
+    position_ = position;
+    // Advance past the last '"'.
+    AdvanceSkipWhitespace();
+    return result;
+  }
+
+  int beg_pos = position_;
+  // Fast case for Latin1 only without escape characters.
+  do {
+    // Check for control character (0x00-0x1f) or unterminated string (<0).
+    if (c0_ < 0x20) return Handle<String>::null();
+    if (c0_ != '\\') {
+      if (seq_one_byte || c0_ <= String::kMaxOneByteCharCode) {
+        Advance();
+      } else {
+        return SlowScanJsonString<SeqTwoByteString, uc16>(source_, beg_pos,
+                                                          position_);
+      }
+    } else {
+      return SlowScanJsonString<SeqOneByteString, uint8_t>(source_, beg_pos,
+                                                           position_);
+    }
+  } while (c0_ != '"');
+  int length = position_ - beg_pos;
+  Handle<String> result =
+      factory()->NewRawOneByteString(length, pretenure_).ToHandleChecked();
+  uint8_t* dest = SeqOneByteString::cast(*result)->GetChars();
+  String::WriteToFlat(*source_, dest, beg_pos, position_);
+
+  DCHECK_EQ('"', c0_);
+  // Advance past the last '"'.
+  AdvanceSkipWhitespace();
+  return result;
+}
+
+// Explicit instantiation.
+template class JsonParser<true>;
+template class JsonParser<false>;
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/json-parser.h b/src/json-parser.h
index 1b9829f..2d08fef 100644
--- a/src/json-parser.h
+++ b/src/json-parser.h
@@ -5,95 +5,70 @@
 #ifndef V8_JSON_PARSER_H_
 #define V8_JSON_PARSER_H_
 
-#include "src/char-predicates.h"
-#include "src/conversions.h"
-#include "src/debug/debug.h"
 #include "src/factory.h"
-#include "src/field-type.h"
-#include "src/messages.h"
-#include "src/parsing/scanner.h"
-#include "src/parsing/token.h"
-#include "src/transitions.h"
+#include "src/objects.h"
 
 namespace v8 {
 namespace internal {
 
 enum ParseElementResult { kElementFound, kElementNotFound, kNullHandle };
 
+class JsonParseInternalizer BASE_EMBEDDED {
+ public:
+  static MaybeHandle<Object> Internalize(Isolate* isolate,
+                                         Handle<Object> object,
+                                         Handle<Object> reviver);
+
+ private:
+  JsonParseInternalizer(Isolate* isolate, Handle<JSReceiver> reviver)
+      : isolate_(isolate), reviver_(reviver) {}
+
+  MaybeHandle<Object> InternalizeJsonProperty(Handle<JSReceiver> holder,
+                                              Handle<String> key);
+
+  bool RecurseAndApply(Handle<JSReceiver> holder, Handle<String> name);
+
+  Isolate* isolate_;
+  Handle<JSReceiver> reviver_;
+};
 
 // A simple json parser.
 template <bool seq_one_byte>
 class JsonParser BASE_EMBEDDED {
  public:
-  MUST_USE_RESULT static MaybeHandle<Object> Parse(Handle<String> source) {
-    return JsonParser(source).ParseJson();
+  MUST_USE_RESULT static MaybeHandle<Object> Parse(Isolate* isolate,
+                                                   Handle<String> source,
+                                                   Handle<Object> reviver) {
+    Handle<Object> result;
+    ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+                               JsonParser(isolate, source).ParseJson(), Object);
+    if (reviver->IsCallable()) {
+      return JsonParseInternalizer::Internalize(isolate, result, reviver);
+    }
+    return result;
   }
 
   static const int kEndOfString = -1;
 
  private:
-  explicit JsonParser(Handle<String> source)
-      : source_(source),
-        source_length_(source->length()),
-        isolate_(source->map()->GetHeap()->isolate()),
-        factory_(isolate_->factory()),
-        zone_(isolate_->allocator()),
-        object_constructor_(isolate_->native_context()->object_function(),
-                            isolate_),
-        position_(-1) {
-    source_ = String::Flatten(source_);
-    pretenure_ = (source_length_ >= kPretenureTreshold) ? TENURED : NOT_TENURED;
-
-    // Optimized fast case where we only have Latin1 characters.
-    if (seq_one_byte) {
-      seq_source_ = Handle<SeqOneByteString>::cast(source_);
-    }
-  }
+  JsonParser(Isolate* isolate, Handle<String> source);
 
   // Parse a string containing a single JSON value.
   MaybeHandle<Object> ParseJson();
 
-  inline void Advance() {
-    position_++;
-    if (position_ >= source_length_) {
-      c0_ = kEndOfString;
-    } else if (seq_one_byte) {
-      c0_ = seq_source_->SeqOneByteStringGet(position_);
-    } else {
-      c0_ = source_->Get(position_);
-    }
-  }
+  INLINE(void Advance());
 
   // The JSON lexical grammar is specified in the ECMAScript 5 standard,
   // section 15.12.1.1. The only allowed whitespace characters between tokens
   // are tab, carriage-return, newline and space.
 
-  inline void AdvanceSkipWhitespace() {
-    do {
-      Advance();
-    } while (c0_ == ' ' || c0_ == '\t' || c0_ == '\n' || c0_ == '\r');
-  }
-
-  inline void SkipWhitespace() {
-    while (c0_ == ' ' || c0_ == '\t' || c0_ == '\n' || c0_ == '\r') {
-      Advance();
-    }
-  }
-
-  inline uc32 AdvanceGetChar() {
-    Advance();
-    return c0_;
-  }
+  INLINE(void AdvanceSkipWhitespace());
+  INLINE(void SkipWhitespace());
+  INLINE(uc32 AdvanceGetChar());
 
   // Checks that current charater is c.
   // If so, then consume c and skip whitespace.
-  inline bool MatchSkipWhiteSpace(uc32 c) {
-    if (c0_ == c) {
-      AdvanceSkipWhitespace();
-      return true;
-    }
-    return false;
-  }
+  INLINE(bool MatchSkipWhiteSpace(uc32 c));
 
   // A JSON string (production JSONString) is subset of valid JavaScript string
   // literals. The string must only be double-quoted (not single-quoted), and
@@ -103,30 +78,7 @@
     return ScanJsonString<false>();
   }
 
-  bool ParseJsonString(Handle<String> expected) {
-    int length = expected->length();
-    if (source_->length() - position_ - 1 > length) {
-      DisallowHeapAllocation no_gc;
-      String::FlatContent content = expected->GetFlatContent();
-      if (content.IsOneByte()) {
-        DCHECK_EQ('"', c0_);
-        const uint8_t* input_chars = seq_source_->GetChars() + position_ + 1;
-        const uint8_t* expected_chars = content.ToOneByteVector().start();
-        for (int i = 0; i < length; i++) {
-          uint8_t c0 = input_chars[i];
-          if (c0 != expected_chars[i] || c0 == '"' || c0 < 0x20 || c0 == '\\') {
-            return false;
-          }
-        }
-        if (input_chars[length] == '"') {
-          position_ = position_ + length + 1;
-          AdvanceSkipWhitespace();
-          return true;
-        }
-      }
-    }
-    return false;
-  }
+  bool ParseJsonString(Handle<String> expected);
 
   Handle<String> ParseJsonInternalizedString() {
     Handle<String> result = ScanJsonString<true>();
@@ -188,7 +140,6 @@
   static const int kInitialSpecialStringLength = 32;
   static const int kPretenureTreshold = 100 * 1024;
 
-
  private:
   Zone* zone() { return &zone_; }
 
@@ -208,639 +159,6 @@
   int position_;
 };
 
-template <bool seq_one_byte>
-MaybeHandle<Object> JsonParser<seq_one_byte>::ParseJson() {
-  // Advance to the first character (possibly EOS)
-  AdvanceSkipWhitespace();
-  Handle<Object> result = ParseJsonValue();
-  if (result.is_null() || c0_ != kEndOfString) {
-    // Some exception (for example stack overflow) is already pending.
-    if (isolate_->has_pending_exception()) return Handle<Object>::null();
-
-    // Parse failed. Current character is the unexpected token.
-    Factory* factory = this->factory();
-    MessageTemplate::Template message;
-    Handle<Object> arg1 = Handle<Smi>(Smi::FromInt(position_), isolate());
-    Handle<Object> arg2;
-
-    switch (c0_) {
-      case kEndOfString:
-        message = MessageTemplate::kJsonParseUnexpectedEOS;
-        break;
-      case '-':
-      case '0':
-      case '1':
-      case '2':
-      case '3':
-      case '4':
-      case '5':
-      case '6':
-      case '7':
-      case '8':
-      case '9':
-        message = MessageTemplate::kJsonParseUnexpectedTokenNumber;
-        break;
-      case '"':
-        message = MessageTemplate::kJsonParseUnexpectedTokenString;
-        break;
-      default:
-        message = MessageTemplate::kJsonParseUnexpectedToken;
-        arg2 = arg1;
-        arg1 = factory->LookupSingleCharacterStringFromCode(c0_);
-        break;
-    }
-
-    Handle<Script> script(factory->NewScript(source_));
-    // We should sent compile error event because we compile JSON object in
-    // separated source file.
-    isolate()->debug()->OnCompileError(script);
-    MessageLocation location(script, position_, position_ + 1);
-    Handle<Object> error = factory->NewSyntaxError(message, arg1, arg2);
-    return isolate()->template Throw<Object>(error, &location);
-  }
-  return result;
-}
-
-
-// Parse any JSON value.
-template <bool seq_one_byte>
-Handle<Object> JsonParser<seq_one_byte>::ParseJsonValue() {
-  StackLimitCheck stack_check(isolate_);
-  if (stack_check.HasOverflowed()) {
-    isolate_->StackOverflow();
-    return Handle<Object>::null();
-  }
-
-  if (stack_check.InterruptRequested()) {
-    ExecutionAccess access(isolate_);
-    // Avoid blocking GC in long running parser (v8:3974).
-    isolate_->stack_guard()->HandleGCInterrupt();
-  }
-
-  if (c0_ == '"') return ParseJsonString();
-  if ((c0_ >= '0' && c0_ <= '9') || c0_ == '-') return ParseJsonNumber();
-  if (c0_ == '{') return ParseJsonObject();
-  if (c0_ == '[') return ParseJsonArray();
-  if (c0_ == 'f') {
-    if (AdvanceGetChar() == 'a' && AdvanceGetChar() == 'l' &&
-        AdvanceGetChar() == 's' && AdvanceGetChar() == 'e') {
-      AdvanceSkipWhitespace();
-      return factory()->false_value();
-    }
-    return ReportUnexpectedCharacter();
-  }
-  if (c0_ == 't') {
-    if (AdvanceGetChar() == 'r' && AdvanceGetChar() == 'u' &&
-        AdvanceGetChar() == 'e') {
-      AdvanceSkipWhitespace();
-      return factory()->true_value();
-    }
-    return ReportUnexpectedCharacter();
-  }
-  if (c0_ == 'n') {
-    if (AdvanceGetChar() == 'u' && AdvanceGetChar() == 'l' &&
-        AdvanceGetChar() == 'l') {
-      AdvanceSkipWhitespace();
-      return factory()->null_value();
-    }
-    return ReportUnexpectedCharacter();
-  }
-  return ReportUnexpectedCharacter();
-}
-
-
-template <bool seq_one_byte>
-ParseElementResult JsonParser<seq_one_byte>::ParseElement(
-    Handle<JSObject> json_object) {
-  uint32_t index = 0;
-  // Maybe an array index, try to parse it.
-  if (c0_ == '0') {
-    // With a leading zero, the string has to be "0" only to be an index.
-    Advance();
-  } else {
-    do {
-      int d = c0_ - '0';
-      if (index > 429496729U - ((d + 3) >> 3)) break;
-      index = (index * 10) + d;
-      Advance();
-    } while (IsDecimalDigit(c0_));
-  }
-
-  if (c0_ == '"') {
-    // Successfully parsed index, parse and store element.
-    AdvanceSkipWhitespace();
-
-    if (c0_ == ':') {
-      AdvanceSkipWhitespace();
-      Handle<Object> value = ParseJsonValue();
-      if (!value.is_null()) {
-        JSObject::SetOwnElementIgnoreAttributes(json_object, index, value, NONE)
-            .Assert();
-        return kElementFound;
-      } else {
-        return kNullHandle;
-      }
-    }
-  }
-  return kElementNotFound;
-}
-
-// Parse a JSON object. Position must be right at '{'.
-template <bool seq_one_byte>
-Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
-  HandleScope scope(isolate());
-  Handle<JSObject> json_object =
-      factory()->NewJSObject(object_constructor(), pretenure_);
-  Handle<Map> map(json_object->map());
-  int descriptor = 0;
-  ZoneList<Handle<Object> > properties(8, zone());
-  DCHECK_EQ(c0_, '{');
-
-  bool transitioning = true;
-
-  AdvanceSkipWhitespace();
-  if (c0_ != '}') {
-    do {
-      if (c0_ != '"') return ReportUnexpectedCharacter();
-
-      int start_position = position_;
-      Advance();
-
-      if (IsDecimalDigit(c0_)) {
-        ParseElementResult element_result = ParseElement(json_object);
-        if (element_result == kNullHandle) return Handle<Object>::null();
-        if (element_result == kElementFound) continue;
-      }
-      // Not an index, fallback to the slow path.
-
-      position_ = start_position;
-#ifdef DEBUG
-      c0_ = '"';
-#endif
-
-      Handle<String> key;
-      Handle<Object> value;
-
-      // Try to follow existing transitions as long as possible. Once we stop
-      // transitioning, no transition can be found anymore.
-      DCHECK(transitioning);
-      // First check whether there is a single expected transition. If so, try
-      // to parse it first.
-      bool follow_expected = false;
-      Handle<Map> target;
-      if (seq_one_byte) {
-        key = TransitionArray::ExpectedTransitionKey(map);
-        follow_expected = !key.is_null() && ParseJsonString(key);
-      }
-      // If the expected transition hits, follow it.
-      if (follow_expected) {
-        target = TransitionArray::ExpectedTransitionTarget(map);
-      } else {
-        // If the expected transition failed, parse an internalized string and
-        // try to find a matching transition.
-        key = ParseJsonInternalizedString();
-        if (key.is_null()) return ReportUnexpectedCharacter();
-
-        target = TransitionArray::FindTransitionToField(map, key);
-        // If a transition was found, follow it and continue.
-        transitioning = !target.is_null();
-      }
-      if (c0_ != ':') return ReportUnexpectedCharacter();
-
-      AdvanceSkipWhitespace();
-      value = ParseJsonValue();
-      if (value.is_null()) return ReportUnexpectedCharacter();
-
-      if (transitioning) {
-        PropertyDetails details =
-            target->instance_descriptors()->GetDetails(descriptor);
-        Representation expected_representation = details.representation();
-
-        if (value->FitsRepresentation(expected_representation)) {
-          if (expected_representation.IsHeapObject() &&
-              !target->instance_descriptors()
-                   ->GetFieldType(descriptor)
-                   ->NowContains(value)) {
-            Handle<FieldType> value_type(
-                value->OptimalType(isolate(), expected_representation));
-            Map::GeneralizeFieldType(target, descriptor,
-                                     expected_representation, value_type);
-          }
-          DCHECK(target->instance_descriptors()
-                     ->GetFieldType(descriptor)
-                     ->NowContains(value));
-          properties.Add(value, zone());
-          map = target;
-          descriptor++;
-          continue;
-        } else {
-          transitioning = false;
-        }
-      }
-
-      DCHECK(!transitioning);
-
-      // Commit the intermediate state to the object and stop transitioning.
-      CommitStateToJsonObject(json_object, map, &properties);
-
-      JSObject::DefinePropertyOrElementIgnoreAttributes(json_object, key, value)
-          .Check();
-    } while (transitioning && MatchSkipWhiteSpace(','));
-
-    // If we transitioned until the very end, transition the map now.
-    if (transitioning) {
-      CommitStateToJsonObject(json_object, map, &properties);
-    } else {
-      while (MatchSkipWhiteSpace(',')) {
-        HandleScope local_scope(isolate());
-        if (c0_ != '"') return ReportUnexpectedCharacter();
-
-        int start_position = position_;
-        Advance();
-
-        if (IsDecimalDigit(c0_)) {
-          ParseElementResult element_result = ParseElement(json_object);
-          if (element_result == kNullHandle) return Handle<Object>::null();
-          if (element_result == kElementFound) continue;
-        }
-        // Not an index, fallback to the slow path.
-
-        position_ = start_position;
-#ifdef DEBUG
-        c0_ = '"';
-#endif
-
-        Handle<String> key;
-        Handle<Object> value;
-
-        key = ParseJsonInternalizedString();
-        if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
-
-        AdvanceSkipWhitespace();
-        value = ParseJsonValue();
-        if (value.is_null()) return ReportUnexpectedCharacter();
-
-        JSObject::DefinePropertyOrElementIgnoreAttributes(json_object, key,
-                                                          value).Check();
-      }
-    }
-
-    if (c0_ != '}') {
-      return ReportUnexpectedCharacter();
-    }
-  }
-  AdvanceSkipWhitespace();
-  return scope.CloseAndEscape(json_object);
-}
-
-
-template <bool seq_one_byte>
-void JsonParser<seq_one_byte>::CommitStateToJsonObject(
-    Handle<JSObject> json_object, Handle<Map> map,
-    ZoneList<Handle<Object> >* properties) {
-  JSObject::AllocateStorageForMap(json_object, map);
-  DCHECK(!json_object->map()->is_dictionary_map());
-
-  DisallowHeapAllocation no_gc;
-
-  int length = properties->length();
-  for (int i = 0; i < length; i++) {
-    Handle<Object> value = (*properties)[i];
-    json_object->WriteToField(i, *value);
-  }
-}
-
-
-// Parse a JSON array. Position must be right at '['.
-template <bool seq_one_byte>
-Handle<Object> JsonParser<seq_one_byte>::ParseJsonArray() {
-  HandleScope scope(isolate());
-  ZoneList<Handle<Object> > elements(4, zone());
-  DCHECK_EQ(c0_, '[');
-
-  AdvanceSkipWhitespace();
-  if (c0_ != ']') {
-    do {
-      Handle<Object> element = ParseJsonValue();
-      if (element.is_null()) return ReportUnexpectedCharacter();
-      elements.Add(element, zone());
-    } while (MatchSkipWhiteSpace(','));
-    if (c0_ != ']') {
-      return ReportUnexpectedCharacter();
-    }
-  }
-  AdvanceSkipWhitespace();
-  // Allocate a fixed array with all the elements.
-  Handle<FixedArray> fast_elements =
-      factory()->NewFixedArray(elements.length(), pretenure_);
-  for (int i = 0, n = elements.length(); i < n; i++) {
-    fast_elements->set(i, *elements[i]);
-  }
-  Handle<Object> json_array = factory()->NewJSArrayWithElements(
-      fast_elements, FAST_ELEMENTS, pretenure_);
-  return scope.CloseAndEscape(json_array);
-}
-
-
-template <bool seq_one_byte>
-Handle<Object> JsonParser<seq_one_byte>::ParseJsonNumber() {
-  bool negative = false;
-  int beg_pos = position_;
-  if (c0_ == '-') {
-    Advance();
-    negative = true;
-  }
-  if (c0_ == '0') {
-    Advance();
-    // Prefix zero is only allowed if it's the only digit before
-    // a decimal point or exponent.
-    if (IsDecimalDigit(c0_)) return ReportUnexpectedCharacter();
-  } else {
-    int i = 0;
-    int digits = 0;
-    if (c0_ < '1' || c0_ > '9') return ReportUnexpectedCharacter();
-    do {
-      i = i * 10 + c0_ - '0';
-      digits++;
-      Advance();
-    } while (IsDecimalDigit(c0_));
-    if (c0_ != '.' && c0_ != 'e' && c0_ != 'E' && digits < 10) {
-      SkipWhitespace();
-      return Handle<Smi>(Smi::FromInt((negative ? -i : i)), isolate());
-    }
-  }
-  if (c0_ == '.') {
-    Advance();
-    if (!IsDecimalDigit(c0_)) return ReportUnexpectedCharacter();
-    do {
-      Advance();
-    } while (IsDecimalDigit(c0_));
-  }
-  if (AsciiAlphaToLower(c0_) == 'e') {
-    Advance();
-    if (c0_ == '-' || c0_ == '+') Advance();
-    if (!IsDecimalDigit(c0_)) return ReportUnexpectedCharacter();
-    do {
-      Advance();
-    } while (IsDecimalDigit(c0_));
-  }
-  int length = position_ - beg_pos;
-  double number;
-  if (seq_one_byte) {
-    Vector<const uint8_t> chars(seq_source_->GetChars() +  beg_pos, length);
-    number = StringToDouble(isolate()->unicode_cache(), chars,
-                            NO_FLAGS,  // Hex, octal or trailing junk.
-                            std::numeric_limits<double>::quiet_NaN());
-  } else {
-    Vector<uint8_t> buffer = Vector<uint8_t>::New(length);
-    String::WriteToFlat(*source_, buffer.start(), beg_pos, position_);
-    Vector<const uint8_t> result =
-        Vector<const uint8_t>(buffer.start(), length);
-    number = StringToDouble(isolate()->unicode_cache(),
-                            result,
-                            NO_FLAGS,  // Hex, octal or trailing junk.
-                            0.0);
-    buffer.Dispose();
-  }
-  SkipWhitespace();
-  return factory()->NewNumber(number, pretenure_);
-}
-
-
-template <typename StringType>
-inline void SeqStringSet(Handle<StringType> seq_str, int i, uc32 c);
-
-template <>
-inline void SeqStringSet(Handle<SeqTwoByteString> seq_str, int i, uc32 c) {
-  seq_str->SeqTwoByteStringSet(i, c);
-}
-
-template <>
-inline void SeqStringSet(Handle<SeqOneByteString> seq_str, int i, uc32 c) {
-  seq_str->SeqOneByteStringSet(i, c);
-}
-
-template <typename StringType>
-inline Handle<StringType> NewRawString(Factory* factory,
-                                       int length,
-                                       PretenureFlag pretenure);
-
-template <>
-inline Handle<SeqTwoByteString> NewRawString(Factory* factory,
-                                             int length,
-                                             PretenureFlag pretenure) {
-  return factory->NewRawTwoByteString(length, pretenure).ToHandleChecked();
-}
-
-template <>
-inline Handle<SeqOneByteString> NewRawString(Factory* factory,
-                                           int length,
-                                           PretenureFlag pretenure) {
-  return factory->NewRawOneByteString(length, pretenure).ToHandleChecked();
-}
-
-
-// Scans the rest of a JSON string starting from position_ and writes
-// prefix[start..end] along with the scanned characters into a
-// sequential string of type StringType.
-template <bool seq_one_byte>
-template <typename StringType, typename SinkChar>
-Handle<String> JsonParser<seq_one_byte>::SlowScanJsonString(
-    Handle<String> prefix, int start, int end) {
-  int count = end - start;
-  int max_length = count + source_length_ - position_;
-  int length = Min(max_length, Max(kInitialSpecialStringLength, 2 * count));
-  Handle<StringType> seq_string =
-      NewRawString<StringType>(factory(), length, pretenure_);
-  // Copy prefix into seq_str.
-  SinkChar* dest = seq_string->GetChars();
-  String::WriteToFlat(*prefix, dest, start, end);
-
-  while (c0_ != '"') {
-    // Check for control character (0x00-0x1f) or unterminated string (<0).
-    if (c0_ < 0x20) return Handle<String>::null();
-    if (count >= length) {
-      // We need to create a longer sequential string for the result.
-      return SlowScanJsonString<StringType, SinkChar>(seq_string, 0, count);
-    }
-    if (c0_ != '\\') {
-      // If the sink can contain UC16 characters, or source_ contains only
-      // Latin1 characters, there's no need to test whether we can store the
-      // character. Otherwise check whether the UC16 source character can fit
-      // in the Latin1 sink.
-      if (sizeof(SinkChar) == kUC16Size || seq_one_byte ||
-          c0_ <= String::kMaxOneByteCharCode) {
-        SeqStringSet(seq_string, count++, c0_);
-        Advance();
-      } else {
-        // StringType is SeqOneByteString and we just read a non-Latin1 char.
-        return SlowScanJsonString<SeqTwoByteString, uc16>(seq_string, 0, count);
-      }
-    } else {
-      Advance();  // Advance past the \.
-      switch (c0_) {
-        case '"':
-        case '\\':
-        case '/':
-          SeqStringSet(seq_string, count++, c0_);
-          break;
-        case 'b':
-          SeqStringSet(seq_string, count++, '\x08');
-          break;
-        case 'f':
-          SeqStringSet(seq_string, count++, '\x0c');
-          break;
-        case 'n':
-          SeqStringSet(seq_string, count++, '\x0a');
-          break;
-        case 'r':
-          SeqStringSet(seq_string, count++, '\x0d');
-          break;
-        case 't':
-          SeqStringSet(seq_string, count++, '\x09');
-          break;
-        case 'u': {
-          uc32 value = 0;
-          for (int i = 0; i < 4; i++) {
-            Advance();
-            int digit = HexValue(c0_);
-            if (digit < 0) {
-              return Handle<String>::null();
-            }
-            value = value * 16 + digit;
-          }
-          if (sizeof(SinkChar) == kUC16Size ||
-              value <= String::kMaxOneByteCharCode) {
-            SeqStringSet(seq_string, count++, value);
-            break;
-          } else {
-            // StringType is SeqOneByteString and we just read a non-Latin1
-            // char.
-            position_ -= 6;  // Rewind position_ to \ in \uxxxx.
-            Advance();
-            return SlowScanJsonString<SeqTwoByteString, uc16>(seq_string,
-                                                              0,
-                                                              count);
-          }
-        }
-        default:
-          return Handle<String>::null();
-      }
-      Advance();
-    }
-  }
-
-  DCHECK_EQ('"', c0_);
-  // Advance past the last '"'.
-  AdvanceSkipWhitespace();
-
-  // Shrink seq_string length to count and return.
-  return SeqString::Truncate(seq_string, count);
-}
-
-
-template <bool seq_one_byte>
-template <bool is_internalized>
-Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
-  DCHECK_EQ('"', c0_);
-  Advance();
-  if (c0_ == '"') {
-    AdvanceSkipWhitespace();
-    return factory()->empty_string();
-  }
-
-  if (seq_one_byte && is_internalized) {
-    // Fast path for existing internalized strings.  If the the string being
-    // parsed is not a known internalized string, contains backslashes or
-    // unexpectedly reaches the end of string, return with an empty handle.
-    uint32_t running_hash = isolate()->heap()->HashSeed();
-    int position = position_;
-    uc32 c0 = c0_;
-    do {
-      if (c0 == '\\') {
-        c0_ = c0;
-        int beg_pos = position_;
-        position_ = position;
-        return SlowScanJsonString<SeqOneByteString, uint8_t>(source_,
-                                                             beg_pos,
-                                                             position_);
-      }
-      if (c0 < 0x20) return Handle<String>::null();
-      running_hash = StringHasher::AddCharacterCore(running_hash,
-                                                    static_cast<uint16_t>(c0));
-      position++;
-      if (position >= source_length_) return Handle<String>::null();
-      c0 = seq_source_->SeqOneByteStringGet(position);
-    } while (c0 != '"');
-    int length = position - position_;
-    uint32_t hash = (length <= String::kMaxHashCalcLength)
-                        ? StringHasher::GetHashCore(running_hash)
-                        : static_cast<uint32_t>(length);
-    Vector<const uint8_t> string_vector(
-        seq_source_->GetChars() + position_, length);
-    StringTable* string_table = isolate()->heap()->string_table();
-    uint32_t capacity = string_table->Capacity();
-    uint32_t entry = StringTable::FirstProbe(hash, capacity);
-    uint32_t count = 1;
-    Handle<String> result;
-    while (true) {
-      Object* element = string_table->KeyAt(entry);
-      if (element == isolate()->heap()->undefined_value()) {
-        // Lookup failure.
-        result = factory()->InternalizeOneByteString(
-            seq_source_, position_, length);
-        break;
-      }
-      if (element != isolate()->heap()->the_hole_value() &&
-          String::cast(element)->IsOneByteEqualTo(string_vector)) {
-        result = Handle<String>(String::cast(element), isolate());
-#ifdef DEBUG
-        uint32_t hash_field =
-            (hash << String::kHashShift) | String::kIsNotArrayIndexMask;
-        DCHECK_EQ(static_cast<int>(result->Hash()),
-                  static_cast<int>(hash_field >> String::kHashShift));
-#endif
-        break;
-      }
-      entry = StringTable::NextProbe(entry, count++, capacity);
-    }
-    position_ = position;
-    // Advance past the last '"'.
-    AdvanceSkipWhitespace();
-    return result;
-  }
-
-  int beg_pos = position_;
-  // Fast case for Latin1 only without escape characters.
-  do {
-    // Check for control character (0x00-0x1f) or unterminated string (<0).
-    if (c0_ < 0x20) return Handle<String>::null();
-    if (c0_ != '\\') {
-      if (seq_one_byte || c0_ <= String::kMaxOneByteCharCode) {
-        Advance();
-      } else {
-        return SlowScanJsonString<SeqTwoByteString, uc16>(source_,
-                                                          beg_pos,
-                                                          position_);
-      }
-    } else {
-      return SlowScanJsonString<SeqOneByteString, uint8_t>(source_,
-                                                           beg_pos,
-                                                           position_);
-    }
-  } while (c0_ != '"');
-  int length = position_ - beg_pos;
-  Handle<String> result =
-      factory()->NewRawOneByteString(length, pretenure_).ToHandleChecked();
-  uint8_t* dest = SeqOneByteString::cast(*result)->GetChars();
-  String::WriteToFlat(*source_, dest, beg_pos, position_);
-
-  DCHECK_EQ('"', c0_);
-  // Advance past the last '"'.
-  AdvanceSkipWhitespace();
-  return result;
-}
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/json-stringifier.cc b/src/json-stringifier.cc
new file mode 100644
index 0000000..29685c2
--- /dev/null
+++ b/src/json-stringifier.cc
@@ -0,0 +1,722 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/json-stringifier.h"
+
+#include "src/conversions.h"
+#include "src/lookup.h"
+#include "src/messages.h"
+#include "src/objects-inl.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Translation table to escape Latin1 characters.
+// Table entries start at a multiple of 8 and are null-terminated.
+const char* const JsonStringifier::JsonEscapeTable =
+    "\\u0000\0 \\u0001\0 \\u0002\0 \\u0003\0 "
+    "\\u0004\0 \\u0005\0 \\u0006\0 \\u0007\0 "
+    "\\b\0     \\t\0     \\n\0     \\u000b\0 "
+    "\\f\0     \\r\0     \\u000e\0 \\u000f\0 "
+    "\\u0010\0 \\u0011\0 \\u0012\0 \\u0013\0 "
+    "\\u0014\0 \\u0015\0 \\u0016\0 \\u0017\0 "
+    "\\u0018\0 \\u0019\0 \\u001a\0 \\u001b\0 "
+    "\\u001c\0 \\u001d\0 \\u001e\0 \\u001f\0 "
+    " \0      !\0      \\\"\0     #\0      "
+    "$\0      %\0      &\0      '\0      "
+    "(\0      )\0      *\0      +\0      "
+    ",\0      -\0      .\0      /\0      "
+    "0\0      1\0      2\0      3\0      "
+    "4\0      5\0      6\0      7\0      "
+    "8\0      9\0      :\0      ;\0      "
+    "<\0      =\0      >\0      ?\0      "
+    "@\0      A\0      B\0      C\0      "
+    "D\0      E\0      F\0      G\0      "
+    "H\0      I\0      J\0      K\0      "
+    "L\0      M\0      N\0      O\0      "
+    "P\0      Q\0      R\0      S\0      "
+    "T\0      U\0      V\0      W\0      "
+    "X\0      Y\0      Z\0      [\0      "
+    "\\\\\0     ]\0      ^\0      _\0      "
+    "`\0      a\0      b\0      c\0      "
+    "d\0      e\0      f\0      g\0      "
+    "h\0      i\0      j\0      k\0      "
+    "l\0      m\0      n\0      o\0      "
+    "p\0      q\0      r\0      s\0      "
+    "t\0      u\0      v\0      w\0      "
+    "x\0      y\0      z\0      {\0      "
+    "|\0      }\0      ~\0      \177\0      "
+    "\200\0      \201\0      \202\0      \203\0      "
+    "\204\0      \205\0      \206\0      \207\0      "
+    "\210\0      \211\0      \212\0      \213\0      "
+    "\214\0      \215\0      \216\0      \217\0      "
+    "\220\0      \221\0      \222\0      \223\0      "
+    "\224\0      \225\0      \226\0      \227\0      "
+    "\230\0      \231\0      \232\0      \233\0      "
+    "\234\0      \235\0      \236\0      \237\0      "
+    "\240\0      \241\0      \242\0      \243\0      "
+    "\244\0      \245\0      \246\0      \247\0      "
+    "\250\0      \251\0      \252\0      \253\0      "
+    "\254\0      \255\0      \256\0      \257\0      "
+    "\260\0      \261\0      \262\0      \263\0      "
+    "\264\0      \265\0      \266\0      \267\0      "
+    "\270\0      \271\0      \272\0      \273\0      "
+    "\274\0      \275\0      \276\0      \277\0      "
+    "\300\0      \301\0      \302\0      \303\0      "
+    "\304\0      \305\0      \306\0      \307\0      "
+    "\310\0      \311\0      \312\0      \313\0      "
+    "\314\0      \315\0      \316\0      \317\0      "
+    "\320\0      \321\0      \322\0      \323\0      "
+    "\324\0      \325\0      \326\0      \327\0      "
+    "\330\0      \331\0      \332\0      \333\0      "
+    "\334\0      \335\0      \336\0      \337\0      "
+    "\340\0      \341\0      \342\0      \343\0      "
+    "\344\0      \345\0      \346\0      \347\0      "
+    "\350\0      \351\0      \352\0      \353\0      "
+    "\354\0      \355\0      \356\0      \357\0      "
+    "\360\0      \361\0      \362\0      \363\0      "
+    "\364\0      \365\0      \366\0      \367\0      "
+    "\370\0      \371\0      \372\0      \373\0      "
+    "\374\0      \375\0      \376\0      \377\0      ";
+
+JsonStringifier::JsonStringifier(Isolate* isolate)
+    : isolate_(isolate), builder_(isolate), gap_(nullptr), indent_(0) {
+  tojson_string_ = factory()->toJSON_string();
+  stack_ = factory()->NewJSArray(8);
+}
+
+MaybeHandle<Object> JsonStringifier::Stringify(Handle<Object> object,
+                                               Handle<Object> replacer,
+                                               Handle<Object> gap) {
+  if (!InitializeReplacer(replacer)) return MaybeHandle<Object>();
+  if (!gap->IsUndefined(isolate_) && !InitializeGap(gap)) {
+    return MaybeHandle<Object>();
+  }
+  Result result = SerializeObject(object);
+  if (result == UNCHANGED) return factory()->undefined_value();
+  if (result == SUCCESS) return builder_.Finish();
+  DCHECK(result == EXCEPTION);
+  return MaybeHandle<Object>();
+}
+
+bool IsInList(Handle<String> key, List<Handle<String> >* list) {
+  // TODO(yangguo): This is O(n^2) for n properties in the list. Deal with this
+  // if this becomes an issue.
+  for (const Handle<String>& existing : *list) {
+    if (String::Equals(existing, key)) return true;
+  }
+  return false;
+}
+
+bool JsonStringifier::InitializeReplacer(Handle<Object> replacer) {
+  DCHECK(property_list_.is_null());
+  DCHECK(replacer_function_.is_null());
+  Maybe<bool> is_array = Object::IsArray(replacer);
+  if (is_array.IsNothing()) return false;
+  if (is_array.FromJust()) {
+    HandleScope handle_scope(isolate_);
+    List<Handle<String> > list;
+    Handle<Object> length_obj;
+    ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+        isolate_, length_obj,
+        Object::GetLengthFromArrayLike(isolate_, replacer), false);
+    uint32_t length;
+    if (!length_obj->ToUint32(&length)) length = kMaxUInt32;
+    for (uint32_t i = 0; i < length; i++) {
+      Handle<Object> element;
+      Handle<String> key;
+      ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+          isolate_, element, Object::GetElement(isolate_, replacer, i), false);
+      if (element->IsNumber() || element->IsString()) {
+        ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+            isolate_, key, Object::ToString(isolate_, element), false);
+      } else if (element->IsJSValue()) {
+        Handle<Object> value(Handle<JSValue>::cast(element)->value(), isolate_);
+        if (value->IsNumber() || value->IsString()) {
+          ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+              isolate_, key, Object::ToString(isolate_, element), false);
+        }
+      }
+      if (key.is_null()) continue;
+      if (!IsInList(key, &list)) list.Add(key);
+    }
+    property_list_ = factory()->NewUninitializedFixedArray(list.length());
+    for (int i = 0; i < list.length(); i++) {
+      property_list_->set(i, *list[i]);
+    }
+    property_list_ = handle_scope.CloseAndEscape(property_list_);
+  } else if (replacer->IsCallable()) {
+    replacer_function_ = Handle<JSReceiver>::cast(replacer);
+  }
+  return true;
+}
+
+bool JsonStringifier::InitializeGap(Handle<Object> gap) {
+  DCHECK_NULL(gap_);
+  HandleScope scope(isolate_);
+  if (gap->IsJSValue()) {
+    Handle<Object> value(Handle<JSValue>::cast(gap)->value(), isolate_);
+    if (value->IsString()) {
+      ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate_, gap,
+                                       Object::ToString(isolate_, gap), false);
+    } else if (value->IsNumber()) {
+      ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate_, gap, Object::ToNumber(gap),
+                                       false);
+    }
+  }
+
+  if (gap->IsString()) {
+    Handle<String> gap_string = Handle<String>::cast(gap);
+    if (gap_string->length() > 0) {
+      int gap_length = std::min(gap_string->length(), 10);
+      gap_ = NewArray<uc16>(gap_length + 1);
+      String::WriteToFlat(*gap_string, gap_, 0, gap_length);
+      for (int i = 0; i < gap_length; i++) {
+        if (gap_[i] > String::kMaxOneByteCharCode) {
+          builder_.ChangeEncoding();
+          break;
+        }
+      }
+      gap_[gap_length] = '\0';
+    }
+  } else if (gap->IsNumber()) {
+    int num_value = DoubleToInt32(gap->Number());
+    if (num_value > 0) {
+      int gap_length = std::min(num_value, 10);
+      gap_ = NewArray<uc16>(gap_length + 1);
+      for (int i = 0; i < gap_length; i++) gap_[i] = ' ';
+      gap_[gap_length] = '\0';
+    }
+  }
+  return true;
+}
+
+MaybeHandle<Object> JsonStringifier::ApplyToJsonFunction(Handle<Object> object,
+                                                         Handle<Object> key) {
+  HandleScope scope(isolate_);
+  LookupIterator it(object, tojson_string_,
+                    LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+  Handle<Object> fun;
+  ASSIGN_RETURN_ON_EXCEPTION(isolate_, fun, Object::GetProperty(&it), Object);
+  if (!fun->IsCallable()) return object;
+
+  // Call toJSON function.
+  if (key->IsSmi()) key = factory()->NumberToString(key);
+  Handle<Object> argv[] = {key};
+  ASSIGN_RETURN_ON_EXCEPTION(isolate_, object,
+                             Execution::Call(isolate_, fun, object, 1, argv),
+                             Object);
+  return scope.CloseAndEscape(object);
+}
+
+MaybeHandle<Object> JsonStringifier::ApplyReplacerFunction(
+    Handle<Object> value, Handle<Object> key, Handle<Object> initial_holder) {
+  HandleScope scope(isolate_);
+  if (key->IsSmi()) key = factory()->NumberToString(key);
+  Handle<Object> argv[] = {key, value};
+  Handle<JSReceiver> holder = CurrentHolder(value, initial_holder);
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate_, value,
+      Execution::Call(isolate_, replacer_function_, holder, 2, argv), Object);
+  return scope.CloseAndEscape(value);
+}
+
+Handle<JSReceiver> JsonStringifier::CurrentHolder(
+    Handle<Object> value, Handle<Object> initial_holder) {
+  int length = Smi::cast(stack_->length())->value();
+  if (length == 0) {
+    Handle<JSObject> holder =
+        factory()->NewJSObject(isolate_->object_function());
+    JSObject::AddProperty(holder, factory()->empty_string(), initial_holder,
+                          NONE);
+    return holder;
+  } else {
+    FixedArray* elements = FixedArray::cast(stack_->elements());
+    return Handle<JSReceiver>(JSReceiver::cast(elements->get(length - 1)),
+                              isolate_);
+  }
+}
+
+JsonStringifier::Result JsonStringifier::StackPush(Handle<Object> object) {
+  StackLimitCheck check(isolate_);
+  if (check.HasOverflowed()) {
+    isolate_->StackOverflow();
+    return EXCEPTION;
+  }
+
+  int length = Smi::cast(stack_->length())->value();
+  {
+    DisallowHeapAllocation no_allocation;
+    FixedArray* elements = FixedArray::cast(stack_->elements());
+    for (int i = 0; i < length; i++) {
+      if (elements->get(i) == *object) {
+        AllowHeapAllocation allow_to_return_error;
+        Handle<Object> error =
+            factory()->NewTypeError(MessageTemplate::kCircularStructure);
+        isolate_->Throw(*error);
+        return EXCEPTION;
+      }
+    }
+  }
+  JSArray::SetLength(stack_, length + 1);
+  FixedArray::cast(stack_->elements())->set(length, *object);
+  return SUCCESS;
+}
+
+void JsonStringifier::StackPop() {
+  int length = Smi::cast(stack_->length())->value();
+  stack_->set_length(Smi::FromInt(length - 1));
+}
+
+template <bool deferred_string_key>
+JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
+                                                    bool comma,
+                                                    Handle<Object> key) {
+  StackLimitCheck interrupt_check(isolate_);
+  Handle<Object> initial_value = object;
+  if (interrupt_check.InterruptRequested() &&
+      isolate_->stack_guard()->HandleInterrupts()->IsException(isolate_)) {
+    return EXCEPTION;
+  }
+  if (object->IsJSReceiver()) {
+    ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+        isolate_, object, ApplyToJsonFunction(object, key), EXCEPTION);
+  }
+  if (!replacer_function_.is_null()) {
+    ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+        isolate_, object, ApplyReplacerFunction(object, key, initial_value),
+        EXCEPTION);
+  }
+
+  if (object->IsSmi()) {
+    if (deferred_string_key) SerializeDeferredKey(comma, key);
+    return SerializeSmi(Smi::cast(*object));
+  }
+
+  switch (HeapObject::cast(*object)->map()->instance_type()) {
+    case HEAP_NUMBER_TYPE:
+    case MUTABLE_HEAP_NUMBER_TYPE:
+      if (deferred_string_key) SerializeDeferredKey(comma, key);
+      return SerializeHeapNumber(Handle<HeapNumber>::cast(object));
+    case ODDBALL_TYPE:
+      switch (Oddball::cast(*object)->kind()) {
+        case Oddball::kFalse:
+          if (deferred_string_key) SerializeDeferredKey(comma, key);
+          builder_.AppendCString("false");
+          return SUCCESS;
+        case Oddball::kTrue:
+          if (deferred_string_key) SerializeDeferredKey(comma, key);
+          builder_.AppendCString("true");
+          return SUCCESS;
+        case Oddball::kNull:
+          if (deferred_string_key) SerializeDeferredKey(comma, key);
+          builder_.AppendCString("null");
+          return SUCCESS;
+        default:
+          return UNCHANGED;
+      }
+    case JS_ARRAY_TYPE:
+      if (deferred_string_key) SerializeDeferredKey(comma, key);
+      return SerializeJSArray(Handle<JSArray>::cast(object));
+    case JS_VALUE_TYPE:
+      if (deferred_string_key) SerializeDeferredKey(comma, key);
+      return SerializeJSValue(Handle<JSValue>::cast(object));
+    case SIMD128_VALUE_TYPE:
+    case SYMBOL_TYPE:
+      return UNCHANGED;
+    default:
+      if (object->IsString()) {
+        if (deferred_string_key) SerializeDeferredKey(comma, key);
+        SerializeString(Handle<String>::cast(object));
+        return SUCCESS;
+      } else {
+        DCHECK(object->IsJSReceiver());
+        if (object->IsCallable()) return UNCHANGED;
+        // Go to slow path for global proxy and objects requiring access checks.
+        if (deferred_string_key) SerializeDeferredKey(comma, key);
+        if (object->IsJSProxy()) {
+          return SerializeJSProxy(Handle<JSProxy>::cast(object));
+        }
+        return SerializeJSObject(Handle<JSObject>::cast(object));
+      }
+  }
+
+  UNREACHABLE();
+  return UNCHANGED;
+}
+
+JsonStringifier::Result JsonStringifier::SerializeJSValue(
+    Handle<JSValue> object) {
+  String* class_name = object->class_name();
+  if (class_name == isolate_->heap()->String_string()) {
+    Handle<Object> value;
+    ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+        isolate_, value, Object::ToString(isolate_, object), EXCEPTION);
+    SerializeString(Handle<String>::cast(value));
+  } else if (class_name == isolate_->heap()->Number_string()) {
+    Handle<Object> value;
+    ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate_, value, Object::ToNumber(object),
+                                     EXCEPTION);
+    if (value->IsSmi()) return SerializeSmi(Smi::cast(*value));
+    SerializeHeapNumber(Handle<HeapNumber>::cast(value));
+  } else if (class_name == isolate_->heap()->Boolean_string()) {
+    Object* value = JSValue::cast(*object)->value();
+    DCHECK(value->IsBoolean());
+    builder_.AppendCString(value->IsTrue(isolate_) ? "true" : "false");
+  } else {
+    // ES6 24.3.2.1 step 10.c, serialize as an ordinary JSObject.
+    return SerializeJSObject(object);
+  }
+  return SUCCESS;
+}
+
+JsonStringifier::Result JsonStringifier::SerializeSmi(Smi* object) {
+  static const int kBufferSize = 100;
+  char chars[kBufferSize];
+  Vector<char> buffer(chars, kBufferSize);
+  builder_.AppendCString(IntToCString(object->value(), buffer));
+  return SUCCESS;
+}
+
+JsonStringifier::Result JsonStringifier::SerializeDouble(double number) {
+  if (std::isinf(number) || std::isnan(number)) {
+    builder_.AppendCString("null");
+    return SUCCESS;
+  }
+  static const int kBufferSize = 100;
+  char chars[kBufferSize];
+  Vector<char> buffer(chars, kBufferSize);
+  builder_.AppendCString(DoubleToCString(number, buffer));
+  return SUCCESS;
+}
+
+JsonStringifier::Result JsonStringifier::SerializeJSArray(
+    Handle<JSArray> object) {
+  HandleScope handle_scope(isolate_);
+  Result stack_push = StackPush(object);
+  if (stack_push != SUCCESS) return stack_push;
+  uint32_t length = 0;
+  CHECK(object->length()->ToArrayLength(&length));
+  DCHECK(!object->IsAccessCheckNeeded());
+  builder_.AppendCharacter('[');
+  Indent();
+  uint32_t i = 0;
+  if (replacer_function_.is_null()) {
+    switch (object->GetElementsKind()) {
+      case FAST_SMI_ELEMENTS: {
+        Handle<FixedArray> elements(FixedArray::cast(object->elements()),
+                                    isolate_);
+        StackLimitCheck interrupt_check(isolate_);
+        while (i < length) {
+          if (interrupt_check.InterruptRequested() &&
+              isolate_->stack_guard()->HandleInterrupts()->IsException(
+                  isolate_)) {
+            return EXCEPTION;
+          }
+          Separator(i == 0);
+          SerializeSmi(Smi::cast(elements->get(i)));
+          i++;
+        }
+        break;
+      }
+      case FAST_DOUBLE_ELEMENTS: {
+        // Empty array is FixedArray but not FixedDoubleArray.
+        if (length == 0) break;
+        Handle<FixedDoubleArray> elements(
+            FixedDoubleArray::cast(object->elements()), isolate_);
+        StackLimitCheck interrupt_check(isolate_);
+        while (i < length) {
+          if (interrupt_check.InterruptRequested() &&
+              isolate_->stack_guard()->HandleInterrupts()->IsException(
+                  isolate_)) {
+            return EXCEPTION;
+          }
+          Separator(i == 0);
+          SerializeDouble(elements->get_scalar(i));
+          i++;
+        }
+        break;
+      }
+      case FAST_ELEMENTS: {
+        Handle<Object> old_length(object->length(), isolate_);
+        while (i < length) {
+          if (object->length() != *old_length ||
+              object->GetElementsKind() != FAST_ELEMENTS) {
+            // Fall back to slow path.
+            break;
+          }
+          Separator(i == 0);
+          Result result = SerializeElement(
+              isolate_,
+              Handle<Object>(FixedArray::cast(object->elements())->get(i),
+                             isolate_),
+              i);
+          if (result == UNCHANGED) {
+            builder_.AppendCString("null");
+          } else if (result != SUCCESS) {
+            return result;
+          }
+          i++;
+        }
+        break;
+      }
+      // The FAST_HOLEY_* cases could be handled in a faster way. They resemble
+      // the non-holey cases except that a lookup is necessary for holes.
+      default:
+        break;
+    }
+  }
+  if (i < length) {
+    // Slow path for non-fast elements and fall-back in edge case.
+    Result result = SerializeArrayLikeSlow(object, i, length);
+    if (result != SUCCESS) return result;
+  }
+  Unindent();
+  if (length > 0) NewLine();
+  builder_.AppendCharacter(']');
+  StackPop();
+  return SUCCESS;
+}
+
+JsonStringifier::Result JsonStringifier::SerializeArrayLikeSlow(
+    Handle<JSReceiver> object, uint32_t start, uint32_t length) {
+  // We need to write out at least two characters per array element.
+  static const int kMaxSerializableArrayLength = String::kMaxLength / 2;
+  if (length > kMaxSerializableArrayLength) {
+    isolate_->Throw(*isolate_->factory()->NewInvalidStringLengthError());
+    return EXCEPTION;
+  }
+  for (uint32_t i = start; i < length; i++) {
+    Separator(i == 0);
+    Handle<Object> element;
+    ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+        isolate_, element, JSReceiver::GetElement(isolate_, object, i),
+        EXCEPTION);
+    Result result = SerializeElement(isolate_, element, i);
+    if (result == SUCCESS) continue;
+    if (result == UNCHANGED) {
+      // Detect overflow sooner for large sparse arrays.
+      if (builder_.HasOverflowed()) return EXCEPTION;
+      builder_.AppendCString("null");
+    } else {
+      return result;
+    }
+  }
+  return SUCCESS;
+}
+
+JsonStringifier::Result JsonStringifier::SerializeJSObject(
+    Handle<JSObject> object) {
+  HandleScope handle_scope(isolate_);
+  Result stack_push = StackPush(object);
+  if (stack_push != SUCCESS) return stack_push;
+
+  if (property_list_.is_null() &&
+      object->map()->instance_type() > LAST_CUSTOM_ELEMENTS_RECEIVER &&
+      object->HasFastProperties() &&
+      Handle<JSObject>::cast(object)->elements()->length() == 0) {
+    DCHECK(object->IsJSObject());
+    DCHECK(!object->IsJSGlobalProxy());
+    Handle<JSObject> js_obj = Handle<JSObject>::cast(object);
+    DCHECK(!js_obj->HasIndexedInterceptor());
+    DCHECK(!js_obj->HasNamedInterceptor());
+    Handle<Map> map(js_obj->map());
+    builder_.AppendCharacter('{');
+    Indent();
+    bool comma = false;
+    for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+      Handle<Name> name(map->instance_descriptors()->GetKey(i), isolate_);
+      // TODO(rossberg): Should this throw?
+      if (!name->IsString()) continue;
+      Handle<String> key = Handle<String>::cast(name);
+      PropertyDetails details = map->instance_descriptors()->GetDetails(i);
+      if (details.IsDontEnum()) continue;
+      Handle<Object> property;
+      if (details.type() == DATA && *map == js_obj->map()) {
+        FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
+        property = JSObject::FastPropertyAt(js_obj, details.representation(),
+                                            field_index);
+      } else {
+        ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+            isolate_, property, Object::GetPropertyOrElement(js_obj, key),
+            EXCEPTION);
+      }
+      Result result = SerializeProperty(property, comma, key);
+      if (!comma && result == SUCCESS) comma = true;
+      if (result == EXCEPTION) return result;
+    }
+    Unindent();
+    if (comma) NewLine();
+    builder_.AppendCharacter('}');
+  } else {
+    Result result = SerializeJSReceiverSlow(object);
+    if (result != SUCCESS) return result;
+  }
+  StackPop();
+  return SUCCESS;
+}
+
+JsonStringifier::Result JsonStringifier::SerializeJSReceiverSlow(
+    Handle<JSReceiver> object) {
+  Handle<FixedArray> contents = property_list_;
+  if (contents.is_null()) {
+    ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+        isolate_, contents,
+        KeyAccumulator::GetKeys(object, KeyCollectionMode::kOwnOnly,
+                                ENUMERABLE_STRINGS,
+                                GetKeysConversion::kConvertToString),
+        EXCEPTION);
+  }
+  builder_.AppendCharacter('{');
+  Indent();
+  bool comma = false;
+  for (int i = 0; i < contents->length(); i++) {
+    Handle<String> key(String::cast(contents->get(i)), isolate_);
+    Handle<Object> property;
+    ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate_, property,
+                                     Object::GetPropertyOrElement(object, key),
+                                     EXCEPTION);
+    Result result = SerializeProperty(property, comma, key);
+    if (!comma && result == SUCCESS) comma = true;
+    if (result == EXCEPTION) return result;
+  }
+  Unindent();
+  if (comma) NewLine();
+  builder_.AppendCharacter('}');
+  return SUCCESS;
+}
+
+JsonStringifier::Result JsonStringifier::SerializeJSProxy(
+    Handle<JSProxy> object) {
+  HandleScope scope(isolate_);
+  Result stack_push = StackPush(object);
+  if (stack_push != SUCCESS) return stack_push;
+  Maybe<bool> is_array = Object::IsArray(object);
+  if (is_array.IsNothing()) return EXCEPTION;
+  if (is_array.FromJust()) {
+    Handle<Object> length_object;
+    ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+        isolate_, length_object,
+        Object::GetLengthFromArrayLike(isolate_, object), EXCEPTION);
+    uint32_t length;
+    if (!length_object->ToUint32(&length)) {
+      // Technically, we need to be able to handle lengths outside the
+      // uint32_t range. However, we would run into string size overflow
+      // if we tried to stringify such an array.
+      isolate_->Throw(*isolate_->factory()->NewInvalidStringLengthError());
+      return EXCEPTION;
+    }
+    builder_.AppendCharacter('[');
+    Indent();
+    Result result = SerializeArrayLikeSlow(object, 0, length);
+    if (result != SUCCESS) return result;
+    Unindent();
+    if (length > 0) NewLine();
+    builder_.AppendCharacter(']');
+  } else {
+    Result result = SerializeJSReceiverSlow(object);
+    if (result != SUCCESS) return result;
+  }
+  StackPop();
+  return SUCCESS;
+}
+
+template <typename SrcChar, typename DestChar>
+void JsonStringifier::SerializeStringUnchecked_(
+    Vector<const SrcChar> src,
+    IncrementalStringBuilder::NoExtend<DestChar>* dest) {
+  // Assert that uc16 character is not truncated down to 8 bit.
+  // The <uc16, char> version of this method must not be called.
+  DCHECK(sizeof(DestChar) >= sizeof(SrcChar));
+
+  for (int i = 0; i < src.length(); i++) {
+    SrcChar c = src[i];
+    if (DoNotEscape(c)) {
+      dest->Append(c);
+    } else {
+      dest->AppendCString(&JsonEscapeTable[c * kJsonEscapeTableEntrySize]);
+    }
+  }
+}
+
+template <typename SrcChar, typename DestChar>
+void JsonStringifier::SerializeString_(Handle<String> string) {
+  int length = string->length();
+  builder_.Append<uint8_t, DestChar>('"');
+  // We make a rough estimate to find out if the current string can be
+  // serialized without allocating a new string part. The worst case length of
+  // an escaped character is 6.  Shifting the remainin string length right by 3
+  // is a more pessimistic estimate, but faster to calculate.
+  int worst_case_length = length << 3;
+  if (builder_.CurrentPartCanFit(worst_case_length)) {
+    DisallowHeapAllocation no_gc;
+    Vector<const SrcChar> vector = string->GetCharVector<SrcChar>();
+    IncrementalStringBuilder::NoExtendBuilder<DestChar> no_extend(
+        &builder_, worst_case_length);
+    SerializeStringUnchecked_(vector, &no_extend);
+  } else {
+    FlatStringReader reader(isolate_, string);
+    for (int i = 0; i < reader.length(); i++) {
+      SrcChar c = reader.Get<SrcChar>(i);
+      if (DoNotEscape(c)) {
+        builder_.Append<SrcChar, DestChar>(c);
+      } else {
+        builder_.AppendCString(&JsonEscapeTable[c * kJsonEscapeTableEntrySize]);
+      }
+    }
+  }
+
+  builder_.Append<uint8_t, DestChar>('"');
+}
+
+template <>
+bool JsonStringifier::DoNotEscape(uint8_t c) {
+  return c >= '#' && c <= '~' && c != '\\';
+}
+
+template <>
+bool JsonStringifier::DoNotEscape(uint16_t c) {
+  return c >= '#' && c != '\\' && c != 0x7f;
+}
+
+void JsonStringifier::NewLine() {
+  if (gap_ == nullptr) return;
+  builder_.AppendCharacter('\n');
+  for (int i = 0; i < indent_; i++) builder_.AppendCString(gap_);
+}
+
+void JsonStringifier::Separator(bool first) {
+  if (!first) builder_.AppendCharacter(',');
+  NewLine();
+}
+
+void JsonStringifier::SerializeDeferredKey(bool deferred_comma,
+                                           Handle<Object> deferred_key) {
+  Separator(!deferred_comma);
+  SerializeString(Handle<String>::cast(deferred_key));
+  builder_.AppendCharacter(':');
+  if (gap_ != nullptr) builder_.AppendCharacter(' ');
+}
+
+void JsonStringifier::SerializeString(Handle<String> object) {
+  object = String::Flatten(object);
+  if (builder_.CurrentEncoding() == String::ONE_BYTE_ENCODING) {
+    if (object->IsOneByteRepresentationUnderneath()) {
+      SerializeString_<uint8_t, uint8_t>(object);
+    } else {
+      builder_.ChangeEncoding();
+      SerializeString(object);
+    }
+  } else {
+    if (object->IsOneByteRepresentationUnderneath()) {
+      SerializeString_<uint8_t, uc16>(object);
+    } else {
+      SerializeString_<uc16, uc16>(object);
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/json-stringifier.h b/src/json-stringifier.h
index b40a782..e72bd9d 100644
--- a/src/json-stringifier.h
+++ b/src/json-stringifier.h
@@ -5,36 +5,33 @@
 #ifndef V8_JSON_STRINGIFIER_H_
 #define V8_JSON_STRINGIFIER_H_
 
-#include "src/conversions.h"
-#include "src/lookup.h"
-#include "src/messages.h"
+#include "src/objects.h"
 #include "src/string-builder.h"
-#include "src/utils.h"
 
 namespace v8 {
 namespace internal {
 
-class BasicJsonStringifier BASE_EMBEDDED {
+class JsonStringifier BASE_EMBEDDED {
  public:
-  explicit BasicJsonStringifier(Isolate* isolate);
+  explicit JsonStringifier(Isolate* isolate);
 
-  MUST_USE_RESULT MaybeHandle<Object> Stringify(Handle<Object> object);
+  ~JsonStringifier() { DeleteArray(gap_); }
 
-  MUST_USE_RESULT INLINE(static MaybeHandle<Object> StringifyString(
-      Isolate* isolate,
-      Handle<String> object));
+  MUST_USE_RESULT MaybeHandle<Object> Stringify(Handle<Object> object,
+                                                Handle<Object> replacer,
+                                                Handle<Object> gap);
 
  private:
   enum Result { UNCHANGED, SUCCESS, EXCEPTION };
 
+  bool InitializeReplacer(Handle<Object> replacer);
+  bool InitializeGap(Handle<Object> gap);
+
   MUST_USE_RESULT MaybeHandle<Object> ApplyToJsonFunction(
       Handle<Object> object,
       Handle<Object> key);
-
-  Result SerializeGeneric(Handle<Object> object,
-                          Handle<Object> key,
-                          bool deferred_comma,
-                          bool deferred_key);
+  MUST_USE_RESULT MaybeHandle<Object> ApplyReplacerFunction(
+      Handle<Object> value, Handle<Object> key, Handle<Object> initial_holder);
 
   // Entry point to serialize the object.
   INLINE(Result SerializeObject(Handle<Object> obj)) {
@@ -64,11 +61,8 @@
   template <bool deferred_string_key>
   Result Serialize_(Handle<Object> object, bool comma, Handle<Object> key);
 
-  void SerializeDeferredKey(bool deferred_comma, Handle<Object> deferred_key) {
-    if (deferred_comma) builder_.AppendCharacter(',');
-    SerializeString(Handle<String>::cast(deferred_key));
-    builder_.AppendCharacter(':');
-  }
+  INLINE(void SerializeDeferredKey(bool deferred_comma,
+                                   Handle<Object> deferred_key));
 
   Result SerializeSmi(Smi* object);
 
@@ -82,8 +76,10 @@
   INLINE(Result SerializeJSArray(Handle<JSArray> object));
   INLINE(Result SerializeJSObject(Handle<JSObject> object));
 
-  Result SerializeJSArraySlow(Handle<JSArray> object, uint32_t start,
-                              uint32_t length);
+  Result SerializeJSProxy(Handle<JSProxy> object);
+  Result SerializeJSReceiverSlow(Handle<JSReceiver> object);
+  Result SerializeArrayLikeSlow(Handle<JSReceiver> object, uint32_t start,
+                                uint32_t length);
 
   void SerializeString(Handle<String> object);
 
@@ -98,6 +94,14 @@
   template <typename Char>
   INLINE(static bool DoNotEscape(Char c));
 
+  INLINE(void NewLine());
+  INLINE(void Indent() { indent_++; });
+  INLINE(void Unindent() { indent_--; });
+  INLINE(void Separator(bool first));
+
+  Handle<JSReceiver> CurrentHolder(Handle<Object> value,
+                                   Handle<Object> inital_holder);
+
   Result StackPush(Handle<Object> object);
   void StackPop();
 
@@ -107,579 +111,15 @@
   IncrementalStringBuilder builder_;
   Handle<String> tojson_string_;
   Handle<JSArray> stack_;
+  Handle<FixedArray> property_list_;
+  Handle<JSReceiver> replacer_function_;
+  uc16* gap_;
+  int indent_;
 
   static const int kJsonEscapeTableEntrySize = 8;
   static const char* const JsonEscapeTable;
 };
 
-
-// Translation table to escape Latin1 characters.
-// Table entries start at a multiple of 8 and are null-terminated.
-const char* const BasicJsonStringifier::JsonEscapeTable =
-    "\\u0000\0 \\u0001\0 \\u0002\0 \\u0003\0 "
-    "\\u0004\0 \\u0005\0 \\u0006\0 \\u0007\0 "
-    "\\b\0     \\t\0     \\n\0     \\u000b\0 "
-    "\\f\0     \\r\0     \\u000e\0 \\u000f\0 "
-    "\\u0010\0 \\u0011\0 \\u0012\0 \\u0013\0 "
-    "\\u0014\0 \\u0015\0 \\u0016\0 \\u0017\0 "
-    "\\u0018\0 \\u0019\0 \\u001a\0 \\u001b\0 "
-    "\\u001c\0 \\u001d\0 \\u001e\0 \\u001f\0 "
-    " \0      !\0      \\\"\0     #\0      "
-    "$\0      %\0      &\0      '\0      "
-    "(\0      )\0      *\0      +\0      "
-    ",\0      -\0      .\0      /\0      "
-    "0\0      1\0      2\0      3\0      "
-    "4\0      5\0      6\0      7\0      "
-    "8\0      9\0      :\0      ;\0      "
-    "<\0      =\0      >\0      ?\0      "
-    "@\0      A\0      B\0      C\0      "
-    "D\0      E\0      F\0      G\0      "
-    "H\0      I\0      J\0      K\0      "
-    "L\0      M\0      N\0      O\0      "
-    "P\0      Q\0      R\0      S\0      "
-    "T\0      U\0      V\0      W\0      "
-    "X\0      Y\0      Z\0      [\0      "
-    "\\\\\0     ]\0      ^\0      _\0      "
-    "`\0      a\0      b\0      c\0      "
-    "d\0      e\0      f\0      g\0      "
-    "h\0      i\0      j\0      k\0      "
-    "l\0      m\0      n\0      o\0      "
-    "p\0      q\0      r\0      s\0      "
-    "t\0      u\0      v\0      w\0      "
-    "x\0      y\0      z\0      {\0      "
-    "|\0      }\0      ~\0      \177\0      "
-    "\200\0      \201\0      \202\0      \203\0      "
-    "\204\0      \205\0      \206\0      \207\0      "
-    "\210\0      \211\0      \212\0      \213\0      "
-    "\214\0      \215\0      \216\0      \217\0      "
-    "\220\0      \221\0      \222\0      \223\0      "
-    "\224\0      \225\0      \226\0      \227\0      "
-    "\230\0      \231\0      \232\0      \233\0      "
-    "\234\0      \235\0      \236\0      \237\0      "
-    "\240\0      \241\0      \242\0      \243\0      "
-    "\244\0      \245\0      \246\0      \247\0      "
-    "\250\0      \251\0      \252\0      \253\0      "
-    "\254\0      \255\0      \256\0      \257\0      "
-    "\260\0      \261\0      \262\0      \263\0      "
-    "\264\0      \265\0      \266\0      \267\0      "
-    "\270\0      \271\0      \272\0      \273\0      "
-    "\274\0      \275\0      \276\0      \277\0      "
-    "\300\0      \301\0      \302\0      \303\0      "
-    "\304\0      \305\0      \306\0      \307\0      "
-    "\310\0      \311\0      \312\0      \313\0      "
-    "\314\0      \315\0      \316\0      \317\0      "
-    "\320\0      \321\0      \322\0      \323\0      "
-    "\324\0      \325\0      \326\0      \327\0      "
-    "\330\0      \331\0      \332\0      \333\0      "
-    "\334\0      \335\0      \336\0      \337\0      "
-    "\340\0      \341\0      \342\0      \343\0      "
-    "\344\0      \345\0      \346\0      \347\0      "
-    "\350\0      \351\0      \352\0      \353\0      "
-    "\354\0      \355\0      \356\0      \357\0      "
-    "\360\0      \361\0      \362\0      \363\0      "
-    "\364\0      \365\0      \366\0      \367\0      "
-    "\370\0      \371\0      \372\0      \373\0      "
-    "\374\0      \375\0      \376\0      \377\0      ";
-
-
-BasicJsonStringifier::BasicJsonStringifier(Isolate* isolate)
-    : isolate_(isolate), builder_(isolate) {
-  tojson_string_ = factory()->toJSON_string();
-  stack_ = factory()->NewJSArray(8);
-}
-
-
-MaybeHandle<Object> BasicJsonStringifier::Stringify(Handle<Object> object) {
-  Result result = SerializeObject(object);
-  if (result == UNCHANGED) return factory()->undefined_value();
-  if (result == SUCCESS) return builder_.Finish();
-  DCHECK(result == EXCEPTION);
-  return MaybeHandle<Object>();
-}
-
-
-MaybeHandle<Object> BasicJsonStringifier::StringifyString(
-    Isolate* isolate,  Handle<String> object) {
-  static const int kJsonQuoteWorstCaseBlowup = 6;
-  static const int kSpaceForQuotes = 2;
-  int worst_case_length =
-      object->length() * kJsonQuoteWorstCaseBlowup + kSpaceForQuotes;
-
-  if (worst_case_length > 32 * KB) {  // Slow path if too large.
-    BasicJsonStringifier stringifier(isolate);
-    return stringifier.Stringify(object);
-  }
-
-  object = String::Flatten(object);
-  DCHECK(object->IsFlat());
-  Handle<SeqString> result;
-  if (object->IsOneByteRepresentationUnderneath()) {
-    result = isolate->factory()
-                 ->NewRawOneByteString(worst_case_length)
-                 .ToHandleChecked();
-    IncrementalStringBuilder::NoExtendString<uint8_t> no_extend(
-        result, worst_case_length);
-    no_extend.Append('\"');
-    SerializeStringUnchecked_(object->GetFlatContent().ToOneByteVector(),
-                              &no_extend);
-    no_extend.Append('\"');
-    return no_extend.Finalize();
-  } else {
-    result = isolate->factory()
-                 ->NewRawTwoByteString(worst_case_length)
-                 .ToHandleChecked();
-    IncrementalStringBuilder::NoExtendString<uc16> no_extend(result,
-                                                             worst_case_length);
-    no_extend.Append('\"');
-    SerializeStringUnchecked_(object->GetFlatContent().ToUC16Vector(),
-                              &no_extend);
-    no_extend.Append('\"');
-    return no_extend.Finalize();
-  }
-}
-
-
-MaybeHandle<Object> BasicJsonStringifier::ApplyToJsonFunction(
-    Handle<Object> object, Handle<Object> key) {
-  LookupIterator it(object, tojson_string_,
-                    LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
-  Handle<Object> fun;
-  ASSIGN_RETURN_ON_EXCEPTION(isolate_, fun, Object::GetProperty(&it), Object);
-  if (!fun->IsCallable()) return object;
-
-  // Call toJSON function.
-  if (key->IsSmi()) key = factory()->NumberToString(key);
-  Handle<Object> argv[] = { key };
-  HandleScope scope(isolate_);
-  ASSIGN_RETURN_ON_EXCEPTION(
-      isolate_, object,
-      Execution::Call(isolate_, fun, object, 1, argv),
-      Object);
-  return scope.CloseAndEscape(object);
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::StackPush(
-    Handle<Object> object) {
-  StackLimitCheck check(isolate_);
-  if (check.HasOverflowed()) {
-    isolate_->StackOverflow();
-    return EXCEPTION;
-  }
-
-  int length = Smi::cast(stack_->length())->value();
-  {
-    DisallowHeapAllocation no_allocation;
-    FixedArray* elements = FixedArray::cast(stack_->elements());
-    for (int i = 0; i < length; i++) {
-      if (elements->get(i) == *object) {
-        AllowHeapAllocation allow_to_return_error;
-        Handle<Object> error =
-            factory()->NewTypeError(MessageTemplate::kCircularStructure);
-        isolate_->Throw(*error);
-        return EXCEPTION;
-      }
-    }
-  }
-  JSArray::SetLength(stack_, length + 1);
-  FixedArray::cast(stack_->elements())->set(length, *object);
-  return SUCCESS;
-}
-
-
-void BasicJsonStringifier::StackPop() {
-  int length = Smi::cast(stack_->length())->value();
-  stack_->set_length(Smi::FromInt(length - 1));
-}
-
-
-template <bool deferred_string_key>
-BasicJsonStringifier::Result BasicJsonStringifier::Serialize_(
-    Handle<Object> object, bool comma, Handle<Object> key) {
-  if (object->IsJSObject()) {
-    ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-        isolate_, object,
-        ApplyToJsonFunction(object, key),
-        EXCEPTION);
-  }
-
-  if (object->IsSmi()) {
-    if (deferred_string_key) SerializeDeferredKey(comma, key);
-    return SerializeSmi(Smi::cast(*object));
-  }
-
-  switch (HeapObject::cast(*object)->map()->instance_type()) {
-    case HEAP_NUMBER_TYPE:
-    case MUTABLE_HEAP_NUMBER_TYPE:
-      if (deferred_string_key) SerializeDeferredKey(comma, key);
-      return SerializeHeapNumber(Handle<HeapNumber>::cast(object));
-    case ODDBALL_TYPE:
-      switch (Oddball::cast(*object)->kind()) {
-        case Oddball::kFalse:
-          if (deferred_string_key) SerializeDeferredKey(comma, key);
-          builder_.AppendCString("false");
-          return SUCCESS;
-        case Oddball::kTrue:
-          if (deferred_string_key) SerializeDeferredKey(comma, key);
-          builder_.AppendCString("true");
-          return SUCCESS;
-        case Oddball::kNull:
-          if (deferred_string_key) SerializeDeferredKey(comma, key);
-          builder_.AppendCString("null");
-          return SUCCESS;
-        default:
-          return UNCHANGED;
-      }
-    case JS_ARRAY_TYPE:
-      if (object->IsAccessCheckNeeded()) break;
-      if (deferred_string_key) SerializeDeferredKey(comma, key);
-      return SerializeJSArray(Handle<JSArray>::cast(object));
-    case JS_VALUE_TYPE:
-      if (deferred_string_key) SerializeDeferredKey(comma, key);
-      return SerializeJSValue(Handle<JSValue>::cast(object));
-    default:
-      if (object->IsString()) {
-        if (deferred_string_key) SerializeDeferredKey(comma, key);
-        SerializeString(Handle<String>::cast(object));
-        return SUCCESS;
-      } else if (object->IsJSObject()) {
-        if (object->IsCallable()) return UNCHANGED;
-        // Go to slow path for global proxy and objects requiring access checks.
-        if (object->IsAccessCheckNeeded() || object->IsJSGlobalProxy()) break;
-        if (deferred_string_key) SerializeDeferredKey(comma, key);
-        return SerializeJSObject(Handle<JSObject>::cast(object));
-      }
-  }
-
-  return SerializeGeneric(object, key, comma, deferred_string_key);
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeGeneric(
-    Handle<Object> object,
-    Handle<Object> key,
-    bool deferred_comma,
-    bool deferred_key) {
-  Handle<JSFunction> fun = isolate_->json_serialize_adapter();
-  Handle<Object> argv[] = { key, object };
-  Handle<Object> result;
-  ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-      isolate_, result, Execution::Call(isolate_, fun, object, 2, argv),
-      EXCEPTION);
-  if (result->IsUndefined()) return UNCHANGED;
-  if (deferred_key) {
-    if (key->IsSmi()) key = factory()->NumberToString(key);
-    SerializeDeferredKey(deferred_comma, key);
-  }
-
-  builder_.AppendString(Handle<String>::cast(result));
-  return SUCCESS;
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSValue(
-    Handle<JSValue> object) {
-  String* class_name = object->class_name();
-  if (class_name == isolate_->heap()->String_string()) {
-    Handle<Object> value;
-    ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-        isolate_, value, Object::ToString(isolate_, object), EXCEPTION);
-    SerializeString(Handle<String>::cast(value));
-  } else if (class_name == isolate_->heap()->Number_string()) {
-    Handle<Object> value;
-    ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate_, value, Object::ToNumber(object),
-                                     EXCEPTION);
-    if (value->IsSmi()) return SerializeSmi(Smi::cast(*value));
-    SerializeHeapNumber(Handle<HeapNumber>::cast(value));
-  } else if (class_name == isolate_->heap()->Boolean_string()) {
-    Object* value = JSValue::cast(*object)->value();
-    DCHECK(value->IsBoolean());
-    builder_.AppendCString(value->IsTrue() ? "true" : "false");
-  } else {
-    // ES6 24.3.2.1 step 10.c, serialize as an ordinary JSObject.
-    CHECK(!object->IsAccessCheckNeeded());
-    CHECK(!object->IsJSGlobalProxy());
-    return SerializeJSObject(object);
-  }
-  return SUCCESS;
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeSmi(Smi* object) {
-  static const int kBufferSize = 100;
-  char chars[kBufferSize];
-  Vector<char> buffer(chars, kBufferSize);
-  builder_.AppendCString(IntToCString(object->value(), buffer));
-  return SUCCESS;
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeDouble(
-    double number) {
-  if (std::isinf(number) || std::isnan(number)) {
-    builder_.AppendCString("null");
-    return SUCCESS;
-  }
-  static const int kBufferSize = 100;
-  char chars[kBufferSize];
-  Vector<char> buffer(chars, kBufferSize);
-  builder_.AppendCString(DoubleToCString(number, buffer));
-  return SUCCESS;
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArray(
-    Handle<JSArray> object) {
-  HandleScope handle_scope(isolate_);
-  Result stack_push = StackPush(object);
-  if (stack_push != SUCCESS) return stack_push;
-  uint32_t length = 0;
-  CHECK(object->length()->ToArrayLength(&length));
-  builder_.AppendCharacter('[');
-  switch (object->GetElementsKind()) {
-    case FAST_SMI_ELEMENTS: {
-      Handle<FixedArray> elements(FixedArray::cast(object->elements()),
-                                  isolate_);
-      for (uint32_t i = 0; i < length; i++) {
-        if (i > 0) builder_.AppendCharacter(',');
-        SerializeSmi(Smi::cast(elements->get(i)));
-      }
-      break;
-    }
-    case FAST_DOUBLE_ELEMENTS: {
-      // Empty array is FixedArray but not FixedDoubleArray.
-      if (length == 0) break;
-      Handle<FixedDoubleArray> elements(
-          FixedDoubleArray::cast(object->elements()), isolate_);
-      for (uint32_t i = 0; i < length; i++) {
-        if (i > 0) builder_.AppendCharacter(',');
-        SerializeDouble(elements->get_scalar(i));
-      }
-      break;
-    }
-    case FAST_ELEMENTS: {
-      Handle<Object> old_length(object->length(), isolate_);
-      for (uint32_t i = 0; i < length; i++) {
-        if (object->length() != *old_length ||
-            object->GetElementsKind() != FAST_ELEMENTS) {
-          Result result = SerializeJSArraySlow(object, i, length);
-          if (result != SUCCESS) return result;
-          break;
-        }
-        if (i > 0) builder_.AppendCharacter(',');
-        Result result = SerializeElement(
-            isolate_,
-            Handle<Object>(FixedArray::cast(object->elements())->get(i),
-                           isolate_),
-            i);
-        if (result == SUCCESS) continue;
-        if (result == UNCHANGED) {
-          builder_.AppendCString("null");
-        } else {
-          return result;
-        }
-      }
-      break;
-    }
-    // The FAST_HOLEY_* cases could be handled in a faster way. They resemble
-    // the non-holey cases except that a lookup is necessary for holes.
-    default: {
-      Result result = SerializeJSArraySlow(object, 0, length);
-      if (result != SUCCESS) return result;
-      break;
-    }
-  }
-  builder_.AppendCharacter(']');
-  StackPop();
-  return SUCCESS;
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArraySlow(
-    Handle<JSArray> object, uint32_t start, uint32_t length) {
-  for (uint32_t i = start; i < length; i++) {
-    if (i > 0) builder_.AppendCharacter(',');
-    Handle<Object> element;
-    ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-        isolate_, element, JSReceiver::GetElement(isolate_, object, i),
-        EXCEPTION);
-    if (element->IsUndefined()) {
-      builder_.AppendCString("null");
-    } else {
-      Result result = SerializeElement(isolate_, element, i);
-      if (result == SUCCESS) continue;
-      if (result == UNCHANGED) {
-        builder_.AppendCString("null");
-      } else {
-        return result;
-      }
-    }
-  }
-  return SUCCESS;
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
-    Handle<JSObject> object) {
-  HandleScope handle_scope(isolate_);
-  Result stack_push = StackPush(object);
-  if (stack_push != SUCCESS) return stack_push;
-  DCHECK(!object->IsJSGlobalProxy() && !object->IsJSGlobalObject());
-
-  builder_.AppendCharacter('{');
-  bool comma = false;
-
-  if (object->HasFastProperties() &&
-      !object->HasIndexedInterceptor() &&
-      !object->HasNamedInterceptor() &&
-      object->elements()->length() == 0) {
-    Handle<Map> map(object->map());
-    for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
-      Handle<Name> name(map->instance_descriptors()->GetKey(i), isolate_);
-      // TODO(rossberg): Should this throw?
-      if (!name->IsString()) continue;
-      Handle<String> key = Handle<String>::cast(name);
-      PropertyDetails details = map->instance_descriptors()->GetDetails(i);
-      if (details.IsDontEnum()) continue;
-      Handle<Object> property;
-      if (details.type() == DATA && *map == object->map()) {
-        FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
-        Isolate* isolate = object->GetIsolate();
-        if (object->IsUnboxedDoubleField(field_index)) {
-          double value = object->RawFastDoublePropertyAt(field_index);
-          property = isolate->factory()->NewHeapNumber(value);
-
-        } else {
-          property = handle(object->RawFastPropertyAt(field_index), isolate);
-        }
-      } else {
-        ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-            isolate_, property,
-            Object::GetPropertyOrElement(object, key),
-            EXCEPTION);
-      }
-      Result result = SerializeProperty(property, comma, key);
-      if (!comma && result == SUCCESS) comma = true;
-      if (result == EXCEPTION) return result;
-    }
-  } else {
-    Handle<FixedArray> contents;
-    ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-        isolate_, contents,
-        JSReceiver::GetKeys(object, OWN_ONLY, ENUMERABLE_STRINGS), EXCEPTION);
-
-    for (int i = 0; i < contents->length(); i++) {
-      Object* key = contents->get(i);
-      Handle<String> key_handle;
-      MaybeHandle<Object> maybe_property;
-      if (key->IsString()) {
-        key_handle = Handle<String>(String::cast(key), isolate_);
-        maybe_property = Object::GetPropertyOrElement(object, key_handle);
-      } else {
-        DCHECK(key->IsNumber());
-        key_handle = factory()->NumberToString(Handle<Object>(key, isolate_));
-        if (key->IsSmi()) {
-          maybe_property =
-              JSReceiver::GetElement(isolate_, object, Smi::cast(key)->value());
-        } else {
-          maybe_property = Object::GetPropertyOrElement(object, key_handle);
-        }
-      }
-      Handle<Object> property;
-      ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-          isolate_, property, maybe_property, EXCEPTION);
-      Result result = SerializeProperty(property, comma, key_handle);
-      if (!comma && result == SUCCESS) comma = true;
-      if (result == EXCEPTION) return result;
-    }
-  }
-
-  builder_.AppendCharacter('}');
-  StackPop();
-  return SUCCESS;
-}
-
-
-template <typename SrcChar, typename DestChar>
-void BasicJsonStringifier::SerializeStringUnchecked_(
-    Vector<const SrcChar> src,
-    IncrementalStringBuilder::NoExtend<DestChar>* dest) {
-  // Assert that uc16 character is not truncated down to 8 bit.
-  // The <uc16, char> version of this method must not be called.
-  DCHECK(sizeof(DestChar) >= sizeof(SrcChar));
-
-  for (int i = 0; i < src.length(); i++) {
-    SrcChar c = src[i];
-    if (DoNotEscape(c)) {
-      dest->Append(c);
-    } else {
-      dest->AppendCString(&JsonEscapeTable[c * kJsonEscapeTableEntrySize]);
-    }
-  }
-}
-
-
-template <typename SrcChar, typename DestChar>
-void BasicJsonStringifier::SerializeString_(Handle<String> string) {
-  int length = string->length();
-  builder_.Append<uint8_t, DestChar>('"');
-  // We make a rough estimate to find out if the current string can be
-  // serialized without allocating a new string part. The worst case length of
-  // an escaped character is 6.  Shifting the remainin string length right by 3
-  // is a more pessimistic estimate, but faster to calculate.
-  int worst_case_length = length << 3;
-  if (builder_.CurrentPartCanFit(worst_case_length)) {
-    DisallowHeapAllocation no_gc;
-    Vector<const SrcChar> vector = string->GetCharVector<SrcChar>();
-    IncrementalStringBuilder::NoExtendBuilder<DestChar> no_extend(
-        &builder_, worst_case_length);
-    SerializeStringUnchecked_(vector, &no_extend);
-  } else {
-    FlatStringReader reader(isolate_, string);
-    for (int i = 0; i < reader.length(); i++) {
-      SrcChar c = reader.Get<SrcChar>(i);
-      if (DoNotEscape(c)) {
-        builder_.Append<SrcChar, DestChar>(c);
-      } else {
-        builder_.AppendCString(&JsonEscapeTable[c * kJsonEscapeTableEntrySize]);
-      }
-    }
-  }
-
-  builder_.Append<uint8_t, DestChar>('"');
-}
-
-
-template <>
-bool BasicJsonStringifier::DoNotEscape(uint8_t c) {
-  return c >= '#' && c <= '~' && c != '\\';
-}
-
-
-template <>
-bool BasicJsonStringifier::DoNotEscape(uint16_t c) {
-  return c >= '#' && c != '\\' && c != 0x7f;
-}
-
-
-void BasicJsonStringifier::SerializeString(Handle<String> object) {
-  object = String::Flatten(object);
-  if (builder_.CurrentEncoding() == String::ONE_BYTE_ENCODING) {
-    if (object->IsOneByteRepresentationUnderneath()) {
-      SerializeString_<uint8_t, uint8_t>(object);
-    } else {
-      builder_.ChangeEncoding();
-      SerializeString(object);
-    }
-  } else {
-    if (object->IsOneByteRepresentationUnderneath()) {
-      SerializeString_<uint8_t, uc16>(object);
-    } else {
-      SerializeString_<uc16, uc16>(object);
-    }
-  }
-}
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/keys.cc b/src/keys.cc
index 17270eb..75eb162 100644
--- a/src/keys.cc
+++ b/src/keys.cc
@@ -17,9 +17,6 @@
 namespace internal {
 
 KeyAccumulator::~KeyAccumulator() {
-  for (size_t i = 0; i < elements_.size(); i++) {
-    delete elements_[i];
-  }
 }
 
 namespace {
@@ -34,197 +31,58 @@
 }
 
 }  // namespace
-
 MaybeHandle<FixedArray> KeyAccumulator::GetKeys(
-    Handle<JSReceiver> object, KeyCollectionType type, PropertyFilter filter,
-    GetKeysConversion keys_conversion, bool filter_proxy_keys) {
-  USE(ContainsOnlyValidKeys);
+    Handle<JSReceiver> object, KeyCollectionMode mode, PropertyFilter filter,
+    GetKeysConversion keys_conversion, bool filter_proxy_keys, bool is_for_in) {
   Isolate* isolate = object->GetIsolate();
-  KeyAccumulator accumulator(isolate, type, filter);
+  KeyAccumulator accumulator(isolate, mode, filter);
   accumulator.set_filter_proxy_keys(filter_proxy_keys);
+  accumulator.set_is_for_in(is_for_in);
   MAYBE_RETURN(accumulator.CollectKeys(object, object),
                MaybeHandle<FixedArray>());
-  Handle<FixedArray> keys = accumulator.GetKeys(keys_conversion);
-  DCHECK(ContainsOnlyValidKeys(keys));
-  return keys;
+  return accumulator.GetKeys(keys_conversion);
 }
 
 Handle<FixedArray> KeyAccumulator::GetKeys(GetKeysConversion convert) {
-  if (length_ == 0) {
+  if (keys_.is_null()) {
     return isolate_->factory()->empty_fixed_array();
   }
-  // Make sure we have all the lengths collected.
-  NextPrototype();
-
-  if (type_ == OWN_ONLY && !ownProxyKeys_.is_null()) {
-    return ownProxyKeys_;
+  if (mode_ == KeyCollectionMode::kOwnOnly &&
+      keys_->map() == isolate_->heap()->fixed_array_map()) {
+    return Handle<FixedArray>::cast(keys_);
   }
-  // Assemble the result array by first adding the element keys and then the
-  // property keys. We use the total number of String + Symbol keys per level in
-  // |level_lengths_| and the available element keys in the corresponding bucket
-  // in |elements_| to deduce the number of keys to take from the
-  // |string_properties_| and |symbol_properties_| set.
-  Handle<FixedArray> result = isolate_->factory()->NewFixedArray(length_);
-  int insertion_index = 0;
-  int string_properties_index = 0;
-  int symbol_properties_index = 0;
-  // String and Symbol lengths always come in pairs:
-  size_t max_level = level_lengths_.size() / 2;
-  for (size_t level = 0; level < max_level; level++) {
-    int num_string_properties = level_lengths_[level * 2];
-    int num_symbol_properties = level_lengths_[level * 2 + 1];
-    int num_elements = 0;
-    if (num_string_properties < 0) {
-      // If the |num_string_properties| is negative, the current level contains
-      // properties from a proxy, hence we skip the integer keys in |elements_|
-      // since proxies define the complete ordering.
-      num_string_properties = -num_string_properties;
-    } else if (level < elements_.size()) {
-      // Add the element indices for this prototype level.
-      std::vector<uint32_t>* elements = elements_[level];
-      num_elements = static_cast<int>(elements->size());
-      for (int i = 0; i < num_elements; i++) {
-        Handle<Object> key;
-        if (convert == KEEP_NUMBERS) {
-          key = isolate_->factory()->NewNumberFromUint(elements->at(i));
-        } else {
-          key = isolate_->factory()->Uint32ToString(elements->at(i));
-        }
-        result->set(insertion_index, *key);
-        insertion_index++;
-      }
-    }
-    // Add the string property keys for this prototype level.
-    for (int i = 0; i < num_string_properties; i++) {
-      Object* key = string_properties_->KeyAt(string_properties_index);
-      result->set(insertion_index, key);
-      insertion_index++;
-      string_properties_index++;
-    }
-    // Add the symbol property keys for this prototype level.
-    for (int i = 0; i < num_symbol_properties; i++) {
-      Object* key = symbol_properties_->KeyAt(symbol_properties_index);
-      result->set(insertion_index, key);
-      insertion_index++;
-      symbol_properties_index++;
-    }
-    if (FLAG_trace_for_in_enumerate) {
-      PrintF("| strings=%d symbols=%d elements=%i ", num_string_properties,
-             num_symbol_properties, num_elements);
-    }
-  }
-  if (FLAG_trace_for_in_enumerate) {
-    PrintF("|| prototypes=%zu ||\n", max_level);
-  }
-
-  DCHECK_EQ(insertion_index, length_);
+  USE(ContainsOnlyValidKeys);
+  Handle<FixedArray> result =
+      OrderedHashSet::ConvertToKeysArray(keys(), convert);
+  DCHECK(ContainsOnlyValidKeys(result));
   return result;
 }
 
-namespace {
-
-bool AccumulatorHasKey(std::vector<uint32_t>* sub_elements, uint32_t key) {
-  return std::binary_search(sub_elements->begin(), sub_elements->end(), key);
+void KeyAccumulator::AddKey(Object* key, AddKeyConversion convert) {
+  AddKey(handle(key, isolate_), convert);
 }
 
-}  // namespace
-
-bool KeyAccumulator::AddKey(Object* key, AddKeyConversion convert) {
-  return AddKey(handle(key, isolate_), convert);
-}
-
-bool KeyAccumulator::AddKey(Handle<Object> key, AddKeyConversion convert) {
+void KeyAccumulator::AddKey(Handle<Object> key, AddKeyConversion convert) {
   if (key->IsSymbol()) {
-    if (filter_ & SKIP_SYMBOLS) return false;
-    if (Handle<Symbol>::cast(key)->is_private()) return false;
-    return AddSymbolKey(key);
+    if (filter_ & SKIP_SYMBOLS) return;
+    if (Handle<Symbol>::cast(key)->is_private()) return;
+  } else if (filter_ & SKIP_STRINGS) {
+    return;
   }
-  if (filter_ & SKIP_STRINGS) return false;
-  // Make sure we do not add keys to a proxy-level (see AddKeysFromJSProxy).
-  DCHECK_LE(0, level_string_length_);
-  // In some cases (e.g. proxies) we might get in String-converted ints which
-  // should be added to the elements list instead of the properties. For
-  // proxies we have to convert as well but also respect the original order.
-  // Therefore we add a converted key to both sides
-  if (convert == CONVERT_TO_ARRAY_INDEX || convert == PROXY_MAGIC) {
-    uint32_t index = 0;
-    int prev_length = length_;
-    int prev_proto = level_string_length_;
-    if ((key->IsString() && Handle<String>::cast(key)->AsArrayIndex(&index)) ||
-        key->ToArrayIndex(&index)) {
-      bool key_was_added = AddIntegerKey(index);
-      if (convert == CONVERT_TO_ARRAY_INDEX) return key_was_added;
-      if (convert == PROXY_MAGIC) {
-        // If we had an array index (number) and it wasn't added, the key
-        // already existed before, hence we cannot add it to the properties
-        // keys as it would lead to duplicate entries.
-        if (!key_was_added) {
-          return false;
-        }
-        length_ = prev_length;
-        level_string_length_ = prev_proto;
-      }
-    }
+  if (keys_.is_null()) {
+    keys_ = OrderedHashSet::Allocate(isolate_, 16);
   }
-  return AddStringKey(key, convert);
-}
-
-bool KeyAccumulator::AddKey(uint32_t key) { return AddIntegerKey(key); }
-
-bool KeyAccumulator::AddIntegerKey(uint32_t key) {
-  // Make sure we do not add keys to a proxy-level (see AddKeysFromJSProxy).
-  // We mark proxy-levels with a negative length
-  DCHECK_LE(0, level_string_length_);
-  // Binary search over all but the last level. The last one might not be
-  // sorted yet.
-  for (size_t i = 1; i < elements_.size(); i++) {
-    if (AccumulatorHasKey(elements_[i - 1], key)) return false;
+  uint32_t index;
+  if (convert == CONVERT_TO_ARRAY_INDEX && key->IsString() &&
+      Handle<String>::cast(key)->AsArrayIndex(&index)) {
+    key = isolate_->factory()->NewNumberFromUint(index);
   }
-  elements_.back()->push_back(key);
-  length_++;
-  return true;
-}
-
-bool KeyAccumulator::AddStringKey(Handle<Object> key,
-                                  AddKeyConversion convert) {
-  if (string_properties_.is_null()) {
-    string_properties_ = OrderedHashSet::Allocate(isolate_, 16);
-  }
-  // TODO(cbruni): remove this conversion once we throw the correct TypeError
-  // for non-string/symbol elements returned by proxies
-  if (convert == PROXY_MAGIC && key->IsNumber()) {
-    key = isolate_->factory()->NumberToString(key);
-  }
-  int prev_size = string_properties_->NumberOfElements();
-  string_properties_ = OrderedHashSet::Add(string_properties_, key);
-  if (prev_size < string_properties_->NumberOfElements()) {
-    length_++;
-    level_string_length_++;
-    return true;
-  } else {
-    return false;
-  }
-}
-
-bool KeyAccumulator::AddSymbolKey(Handle<Object> key) {
-  if (symbol_properties_.is_null()) {
-    symbol_properties_ = OrderedHashSet::Allocate(isolate_, 16);
-  }
-  int prev_size = symbol_properties_->NumberOfElements();
-  symbol_properties_ = OrderedHashSet::Add(symbol_properties_, key);
-  if (prev_size < symbol_properties_->NumberOfElements()) {
-    length_++;
-    level_symbol_length_++;
-    return true;
-  } else {
-    return false;
-  }
+  keys_ = OrderedHashSet::Add(keys(), key);
 }
 
 void KeyAccumulator::AddKeys(Handle<FixedArray> array,
                              AddKeyConversion convert) {
   int add_length = array->length();
-  if (add_length == 0) return;
   for (int i = 0; i < add_length; i++) {
     Handle<Object> current(array->get(i), isolate_);
     AddKey(current, convert);
@@ -271,81 +129,35 @@
 Maybe<bool> KeyAccumulator::AddKeysFromJSProxy(Handle<JSProxy> proxy,
                                                Handle<FixedArray> keys) {
   if (filter_proxy_keys_) {
+    DCHECK(!is_for_in_);
     ASSIGN_RETURN_ON_EXCEPTION_VALUE(
         isolate_, keys, FilterProxyKeys(isolate_, proxy, keys, filter_),
         Nothing<bool>());
   }
-  // Proxies define a complete list of keys with no distinction of
-  // elements and properties, which breaks the normal assumption for the
-  // KeyAccumulator.
-  if (type_ == OWN_ONLY) {
-    ownProxyKeys_ = keys;
-    level_string_length_ = keys->length();
-    length_ = level_string_length_;
-  } else {
-    AddKeys(keys, PROXY_MAGIC);
+  if (mode_ == KeyCollectionMode::kOwnOnly && !is_for_in_) {
+    // If we collect only the keys from a JSProxy do not sort or deduplicate it.
+    keys_ = keys;
+    return Just(true);
   }
-  // Invert the current length to indicate a present proxy, so we can ignore
-  // element keys for this level. Otherwise we would not fully respect the order
-  // given by the proxy.
-  level_string_length_ = -level_string_length_;
+  AddKeys(keys, is_for_in_ ? CONVERT_TO_ARRAY_INDEX : DO_NOT_CONVERT);
   return Just(true);
 }
 
-void KeyAccumulator::AddElementKeysFromInterceptor(
-    Handle<JSObject> array_like) {
-  AddKeys(array_like, CONVERT_TO_ARRAY_INDEX);
-  // The interceptor might introduce duplicates for the current level, since
-  // these keys get added after the objects's normal element keys.
-  SortCurrentElementsListRemoveDuplicates();
-}
-
-void KeyAccumulator::SortCurrentElementsListRemoveDuplicates() {
-  // Sort and remove duplicates from the current elements level and adjust.
-  // the lengths accordingly.
-  auto last_level = elements_.back();
-  size_t nof_removed_keys = last_level->size();
-  std::sort(last_level->begin(), last_level->end());
-  last_level->erase(std::unique(last_level->begin(), last_level->end()),
-                    last_level->end());
-  // Adjust total length by the number of removed duplicates.
-  nof_removed_keys -= last_level->size();
-  length_ -= static_cast<int>(nof_removed_keys);
-}
-
-void KeyAccumulator::SortCurrentElementsList() {
-  if (elements_.empty()) return;
-  auto element_keys = elements_.back();
-  std::sort(element_keys->begin(), element_keys->end());
-}
-
-void KeyAccumulator::NextPrototype() {
-  // Store the protoLength on the first call of this method.
-  if (!elements_.empty()) {
-    level_lengths_.push_back(level_string_length_);
-    level_lengths_.push_back(level_symbol_length_);
-  }
-  elements_.push_back(new std::vector<uint32_t>());
-  level_string_length_ = 0;
-  level_symbol_length_ = 0;
-}
-
 Maybe<bool> KeyAccumulator::CollectKeys(Handle<JSReceiver> receiver,
                                         Handle<JSReceiver> object) {
   // Proxies have no hidden prototype and we should not trigger the
   // [[GetPrototypeOf]] trap on the last iteration when using
   // AdvanceFollowingProxies.
-  if (type_ == OWN_ONLY && object->IsJSProxy()) {
+  if (mode_ == KeyCollectionMode::kOwnOnly && object->IsJSProxy()) {
     MAYBE_RETURN(CollectOwnJSProxyKeys(receiver, Handle<JSProxy>::cast(object)),
                  Nothing<bool>());
     return Just(true);
   }
 
-  PrototypeIterator::WhereToEnd end = type_ == OWN_ONLY
+  PrototypeIterator::WhereToEnd end = mode_ == KeyCollectionMode::kOwnOnly
                                           ? PrototypeIterator::END_AT_NON_HIDDEN
                                           : PrototypeIterator::END_AT_NULL;
-  for (PrototypeIterator iter(isolate_, object,
-                              PrototypeIterator::START_AT_RECEIVER, end);
+  for (PrototypeIterator iter(isolate_, object, kStartAtReceiver, end);
        !iter.IsAtEnd();) {
     Handle<JSReceiver> current =
         PrototypeIterator::GetCurrent<JSReceiver>(iter);
@@ -363,6 +175,10 @@
     if (!iter.AdvanceFollowingProxiesIgnoringAccessChecks()) {
       return Nothing<bool>();
     }
+    if (!last_non_empty_prototype_.is_null() &&
+        *last_non_empty_prototype_ == *current) {
+      break;
+    }
   }
   return Just(true);
 }
@@ -396,25 +212,26 @@
 void FastKeyAccumulator::Prepare() {
   DisallowHeapAllocation no_gc;
   // Directly go for the fast path for OWN_ONLY keys.
-  if (type_ == OWN_ONLY) return;
+  if (mode_ == KeyCollectionMode::kOwnOnly) return;
   // Fully walk the prototype chain and find the last prototype with keys.
   is_receiver_simple_enum_ = false;
   has_empty_prototype_ = true;
-  JSReceiver* first_non_empty_prototype;
+  JSReceiver* last_prototype = nullptr;
   for (PrototypeIterator iter(isolate_, *receiver_); !iter.IsAtEnd();
        iter.Advance()) {
     JSReceiver* current = iter.GetCurrent<JSReceiver>();
-    if (CheckAndInitalizeSimpleEnumCache(current)) continue;
+    bool has_no_properties = CheckAndInitalizeSimpleEnumCache(current);
+    if (has_no_properties) continue;
+    last_prototype = current;
     has_empty_prototype_ = false;
-    first_non_empty_prototype = current;
-    // TODO(cbruni): use the first non-empty prototype.
-    USE(first_non_empty_prototype);
-    return;
   }
-  DCHECK(has_empty_prototype_);
-  is_receiver_simple_enum_ =
-      receiver_->map()->EnumLength() != kInvalidEnumCacheSentinel &&
-      !JSObject::cast(*receiver_)->HasEnumerableElements();
+  if (has_empty_prototype_) {
+    is_receiver_simple_enum_ =
+        receiver_->map()->EnumLength() != kInvalidEnumCacheSentinel &&
+        !JSObject::cast(*receiver_)->HasEnumerableElements();
+  } else if (last_prototype != nullptr) {
+    last_non_empty_prototype_ = handle(last_prototype, isolate_);
+  }
 }
 
 namespace {
@@ -549,17 +366,19 @@
 
 }  // namespace
 
-MaybeHandle<FixedArray> FastKeyAccumulator::GetKeys(GetKeysConversion convert) {
+MaybeHandle<FixedArray> FastKeyAccumulator::GetKeys(
+    GetKeysConversion keys_conversion) {
   Handle<FixedArray> keys;
-  if (GetKeysFast(convert).ToHandle(&keys)) {
+  if (filter_ == ENUMERABLE_STRINGS &&
+      GetKeysFast(keys_conversion).ToHandle(&keys)) {
     return keys;
   }
-  return GetKeysSlow(convert);
+  return GetKeysSlow(keys_conversion);
 }
 
 MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysFast(
-    GetKeysConversion convert) {
-  bool own_only = has_empty_prototype_ || type_ == OWN_ONLY;
+    GetKeysConversion keys_conversion) {
+  bool own_only = has_empty_prototype_ || mode_ == KeyCollectionMode::kOwnOnly;
   Map* map = receiver_->map();
   if (!own_only || !OnlyHasSimpleProperties(map)) {
     return MaybeHandle<FixedArray>();
@@ -571,7 +390,7 @@
 
   // Do not try to use the enum-cache for dict-mode objects.
   if (map->is_dictionary_map()) {
-    return GetOwnKeysWithElements<false>(isolate_, object, convert);
+    return GetOwnKeysWithElements<false>(isolate_, object, keys_conversion);
   }
   int enum_length = receiver_->map()->EnumLength();
   if (enum_length == kInvalidEnumCacheSentinel) {
@@ -590,22 +409,53 @@
   }
   // The properties-only case failed because there were probably elements on the
   // receiver.
-  return GetOwnKeysWithElements<true>(isolate_, object, convert);
+  return GetOwnKeysWithElements<true>(isolate_, object, keys_conversion);
 }
 
 MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysSlow(
-    GetKeysConversion convert) {
-  return JSReceiver::GetKeys(receiver_, type_, filter_, KEEP_NUMBERS,
-                             filter_proxy_keys_);
+    GetKeysConversion keys_conversion) {
+  KeyAccumulator accumulator(isolate_, mode_, filter_);
+  accumulator.set_filter_proxy_keys(filter_proxy_keys_);
+  accumulator.set_is_for_in(is_for_in_);
+  accumulator.set_last_non_empty_prototype(last_non_empty_prototype_);
+
+  MAYBE_RETURN(accumulator.CollectKeys(receiver_, receiver_),
+               MaybeHandle<FixedArray>());
+  return accumulator.GetKeys(keys_conversion);
 }
 
+namespace {
+
 enum IndexedOrNamed { kIndexed, kNamed };
 
 // Returns |true| on success, |nothing| on exception.
 template <class Callback, IndexedOrNamed type>
-static Maybe<bool> GetKeysFromInterceptor(Handle<JSReceiver> receiver,
-                                          Handle<JSObject> object,
-                                          KeyAccumulator* accumulator) {
+Maybe<bool> CollectInterceptorKeysInternal(Handle<JSReceiver> receiver,
+                                           Handle<JSObject> object,
+                                           Handle<InterceptorInfo> interceptor,
+                                           KeyAccumulator* accumulator) {
+  Isolate* isolate = accumulator->isolate();
+  PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+                                 *object, Object::DONT_THROW);
+  Handle<JSObject> result;
+  if (!interceptor->enumerator()->IsUndefined(isolate)) {
+    Callback enum_fun = v8::ToCData<Callback>(interceptor->enumerator());
+    const char* log_tag = type == kIndexed ? "interceptor-indexed-enum"
+                                           : "interceptor-named-enum";
+    LOG(isolate, ApiObjectAccess(log_tag, *object));
+    result = args.Call(enum_fun);
+  }
+  RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+  if (result.is_null()) return Just(true);
+  accumulator->AddKeys(
+      result, type == kIndexed ? CONVERT_TO_ARRAY_INDEX : DO_NOT_CONVERT);
+  return Just(true);
+}
+
+template <class Callback, IndexedOrNamed type>
+Maybe<bool> CollectInterceptorKeys(Handle<JSReceiver> receiver,
+                                   Handle<JSObject> object,
+                                   KeyAccumulator* accumulator) {
   Isolate* isolate = accumulator->isolate();
   if (type == kIndexed) {
     if (!object->HasIndexedInterceptor()) return Just(true);
@@ -620,99 +470,140 @@
       !interceptor->all_can_read()) {
     return Just(true);
   }
-  PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
-                                 *object, Object::DONT_THROW);
-  Handle<JSObject> result;
-  if (!interceptor->enumerator()->IsUndefined()) {
-    Callback enum_fun = v8::ToCData<Callback>(interceptor->enumerator());
-    const char* log_tag = type == kIndexed ? "interceptor-indexed-enum"
-                                           : "interceptor-named-enum";
-    LOG(isolate, ApiObjectAccess(log_tag, *object));
-    result = args.Call(enum_fun);
-  }
-  RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
-  if (result.is_null()) return Just(true);
-  DCHECK(result->IsJSArray() || result->HasSloppyArgumentsElements());
-  // The accumulator takes care of string/symbol filtering.
-  if (type == kIndexed) {
-    accumulator->AddElementKeysFromInterceptor(result);
-  } else {
-    accumulator->AddKeys(result, DO_NOT_CONVERT);
-  }
-  return Just(true);
+  return CollectInterceptorKeysInternal<Callback, type>(
+      receiver, object, interceptor, accumulator);
 }
 
-void KeyAccumulator::CollectOwnElementIndices(Handle<JSObject> object) {
-  if (filter_ & SKIP_STRINGS) return;
+}  // namespace
+
+Maybe<bool> KeyAccumulator::CollectOwnElementIndices(
+    Handle<JSReceiver> receiver, Handle<JSObject> object) {
+  if (filter_ & SKIP_STRINGS || skip_indices_) return Just(true);
+
   ElementsAccessor* accessor = object->GetElementsAccessor();
   accessor->CollectElementIndices(object, this);
+
+  return CollectInterceptorKeys<v8::IndexedPropertyEnumeratorCallback,
+                                kIndexed>(receiver, object, this);
 }
 
-void KeyAccumulator::CollectOwnPropertyNames(Handle<JSObject> object) {
-  if (object->HasFastProperties()) {
-    int real_size = object->map()->NumberOfOwnDescriptors();
-    Handle<DescriptorArray> descs(object->map()->instance_descriptors(),
-                                  isolate_);
-    for (int i = 0; i < real_size; i++) {
-      PropertyDetails details = descs->GetDetails(i);
-      if ((details.attributes() & filter_) != 0) continue;
-      if (filter_ & ONLY_ALL_CAN_READ) {
-        if (details.kind() != kAccessor) continue;
-        Object* accessors = descs->GetValue(i);
-        if (!accessors->IsAccessorInfo()) continue;
-        if (!AccessorInfo::cast(accessors)->all_can_read()) continue;
-      }
-      Name* key = descs->GetKey(i);
-      if (key->FilterKey(filter_)) continue;
-      AddKey(key, DO_NOT_CONVERT);
+namespace {
+
+template <bool skip_symbols>
+int CollectOwnPropertyNamesInternal(Handle<JSObject> object,
+                                    KeyAccumulator* keys,
+                                    Handle<DescriptorArray> descs,
+                                    int start_index, int limit) {
+  int first_skipped = -1;
+  for (int i = start_index; i < limit; i++) {
+    PropertyDetails details = descs->GetDetails(i);
+    if ((details.attributes() & keys->filter()) != 0) continue;
+    if (keys->filter() & ONLY_ALL_CAN_READ) {
+      if (details.kind() != kAccessor) continue;
+      Object* accessors = descs->GetValue(i);
+      if (!accessors->IsAccessorInfo()) continue;
+      if (!AccessorInfo::cast(accessors)->all_can_read()) continue;
     }
-  } else if (object->IsJSGlobalObject()) {
-    GlobalDictionary::CollectKeysTo(
-        handle(object->global_dictionary(), isolate_), this, filter_);
-  } else {
-    NameDictionary::CollectKeysTo(
-        handle(object->property_dictionary(), isolate_), this, filter_);
+    Name* key = descs->GetKey(i);
+    if (skip_symbols == key->IsSymbol()) {
+      if (first_skipped == -1) first_skipped = i;
+      continue;
+    }
+    if (key->FilterKey(keys->filter())) continue;
+    keys->AddKey(key, DO_NOT_CONVERT);
   }
+  return first_skipped;
+}
+
+}  // namespace
+
+Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
+                                                    Handle<JSObject> object) {
+  if (filter_ == ENUMERABLE_STRINGS) {
+    Handle<FixedArray> enum_keys =
+        KeyAccumulator::GetEnumPropertyKeys(isolate_, object);
+    AddKeys(enum_keys, DO_NOT_CONVERT);
+  } else {
+    if (object->HasFastProperties()) {
+      int limit = object->map()->NumberOfOwnDescriptors();
+      Handle<DescriptorArray> descs(object->map()->instance_descriptors(),
+                                    isolate_);
+      // First collect the strings,
+      int first_symbol =
+          CollectOwnPropertyNamesInternal<true>(object, this, descs, 0, limit);
+      // then the symbols.
+      if (first_symbol != -1) {
+        CollectOwnPropertyNamesInternal<false>(object, this, descs,
+                                               first_symbol, limit);
+      }
+    } else if (object->IsJSGlobalObject()) {
+      GlobalDictionary::CollectKeysTo(
+          handle(object->global_dictionary(), isolate_), this, filter_);
+    } else {
+      NameDictionary::CollectKeysTo(
+          handle(object->property_dictionary(), isolate_), this, filter_);
+    }
+  }
+  // Add the property keys from the interceptor.
+  return CollectInterceptorKeys<v8::GenericNamedPropertyEnumeratorCallback,
+                                kNamed>(receiver, object, this);
+}
+
+Maybe<bool> KeyAccumulator::CollectAccessCheckInterceptorKeys(
+    Handle<AccessCheckInfo> access_check_info, Handle<JSReceiver> receiver,
+    Handle<JSObject> object) {
+  MAYBE_RETURN(
+      (CollectInterceptorKeysInternal<v8::IndexedPropertyEnumeratorCallback,
+                                      kIndexed>(
+          receiver, object,
+          handle(
+              InterceptorInfo::cast(access_check_info->indexed_interceptor()),
+              isolate_),
+          this)),
+      Nothing<bool>());
+  MAYBE_RETURN(
+      (CollectInterceptorKeysInternal<
+          v8::GenericNamedPropertyEnumeratorCallback, kNamed>(
+          receiver, object,
+          handle(InterceptorInfo::cast(access_check_info->named_interceptor()),
+                 isolate_),
+          this)),
+      Nothing<bool>());
+  return Just(true);
 }
 
 // Returns |true| on success, |false| if prototype walking should be stopped,
 // |nothing| if an exception was thrown.
 Maybe<bool> KeyAccumulator::CollectOwnKeys(Handle<JSReceiver> receiver,
                                            Handle<JSObject> object) {
-  NextPrototype();
   // Check access rights if required.
   if (object->IsAccessCheckNeeded() &&
       !isolate_->MayAccess(handle(isolate_->context()), object)) {
     // The cross-origin spec says that [[Enumerate]] shall return an empty
     // iterator when it doesn't have access...
-    if (type_ == INCLUDE_PROTOS) {
+    if (mode_ == KeyCollectionMode::kIncludePrototypes) {
       return Just(false);
     }
     // ...whereas [[OwnPropertyKeys]] shall return whitelisted properties.
-    DCHECK_EQ(OWN_ONLY, type_);
+    DCHECK(KeyCollectionMode::kOwnOnly == mode_);
+    Handle<AccessCheckInfo> access_check_info;
+    {
+      DisallowHeapAllocation no_gc;
+      AccessCheckInfo* maybe_info = AccessCheckInfo::Get(isolate_, object);
+      if (maybe_info) access_check_info = handle(maybe_info, isolate_);
+    }
+    // We always have both kinds of interceptors or none.
+    if (!access_check_info.is_null() &&
+        access_check_info->named_interceptor()) {
+      MAYBE_RETURN(CollectAccessCheckInterceptorKeys(access_check_info,
+                                                     receiver, object),
+                   Nothing<bool>());
+      return Just(false);
+    }
     filter_ = static_cast<PropertyFilter>(filter_ | ONLY_ALL_CAN_READ);
   }
-
-  CollectOwnElementIndices(object);
-
-  // Add the element keys from the interceptor.
-  Maybe<bool> success =
-      GetKeysFromInterceptor<v8::IndexedPropertyEnumeratorCallback, kIndexed>(
-          receiver, object, this);
-  MAYBE_RETURN(success, Nothing<bool>());
-
-  if (filter_ == ENUMERABLE_STRINGS) {
-    Handle<FixedArray> enum_keys =
-        KeyAccumulator::GetEnumPropertyKeys(isolate_, object);
-    AddKeys(enum_keys, DO_NOT_CONVERT);
-  } else {
-    CollectOwnPropertyNames(object);
-  }
-
-  // Add the property keys from the interceptor.
-  success = GetKeysFromInterceptor<v8::GenericNamedPropertyEnumeratorCallback,
-                                   kNamed>(receiver, object, this);
-  MAYBE_RETURN(success, Nothing<bool>());
+  MAYBE_RETURN(CollectOwnElementIndices(receiver, object), Nothing<bool>());
+  MAYBE_RETURN(CollectOwnPropertyNames(receiver, object), Nothing<bool>());
   return Just(true);
 }
 
@@ -765,7 +656,7 @@
                                         isolate_->factory()->ownKeys_string()),
       Nothing<bool>());
   // 6. If trap is undefined, then
-  if (trap->IsUndefined()) {
+  if (trap->IsUndefined(isolate_)) {
     // 6a. Return target.[[OwnPropertyKeys]]().
     return CollectOwnJSProxyTargetKeys(proxy, target);
   }
@@ -822,7 +713,6 @@
       // (No-op, just keep it in |target_keys|.)
     }
   }
-  NextPrototype();  // Prepare for accumulating keys.
   // 15. If extensibleTarget is true and targetNonconfigurableKeys is empty,
   //     then:
   if (extensible_target && nonconfigurable_keys_length == 0) {
@@ -895,8 +785,11 @@
   // TODO(cbruni): avoid creating another KeyAccumulator
   Handle<FixedArray> keys;
   ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-      isolate_, keys, JSReceiver::OwnPropertyKeys(target), Nothing<bool>());
-  NextPrototype();  // Prepare for accumulating keys.
+      isolate_, keys,
+      KeyAccumulator::GetKeys(target, KeyCollectionMode::kOwnOnly, filter_,
+                              GetKeysConversion::kConvertToString,
+                              filter_proxy_keys_, is_for_in_),
+      Nothing<bool>());
   bool prev_filter_proxy_keys_ = filter_proxy_keys_;
   filter_proxy_keys_ = false;
   Maybe<bool> result = AddKeysFromJSProxy(proxy, keys);
diff --git a/src/keys.h b/src/keys.h
index c73f109..502c834 100644
--- a/src/keys.h
+++ b/src/keys.h
@@ -11,7 +11,7 @@
 namespace v8 {
 namespace internal {
 
-enum AddKeyConversion { DO_NOT_CONVERT, CONVERT_TO_ARRAY_INDEX, PROXY_MAGIC };
+enum AddKeyConversion { DO_NOT_CONVERT, CONVERT_TO_ARRAY_INDEX };
 
 // This is a helper class for JSReceiver::GetKeys which collects and sorts keys.
 // GetKeys needs to sort keys per prototype level, first showing the integer
@@ -31,41 +31,46 @@
 // are more compact and allow for reasonably fast includes check.
 class KeyAccumulator final BASE_EMBEDDED {
  public:
-  KeyAccumulator(Isolate* isolate, KeyCollectionType type,
+  KeyAccumulator(Isolate* isolate, KeyCollectionMode mode,
                  PropertyFilter filter)
-      : isolate_(isolate), type_(type), filter_(filter) {}
+      : isolate_(isolate), mode_(mode), filter_(filter) {}
   ~KeyAccumulator();
 
-  static MaybeHandle<FixedArray> GetKeys(Handle<JSReceiver> object,
-                                         KeyCollectionType type,
-                                         PropertyFilter filter,
-                                         GetKeysConversion keys_conversion,
-                                         bool filter_proxy_keys);
-  Handle<FixedArray> GetKeys(GetKeysConversion convert = KEEP_NUMBERS);
+  static MaybeHandle<FixedArray> GetKeys(
+      Handle<JSReceiver> object, KeyCollectionMode mode, PropertyFilter filter,
+      GetKeysConversion keys_conversion = GetKeysConversion::kKeepNumbers,
+      bool filter_proxy_keys = true, bool is_for_in = false);
+
+  Handle<FixedArray> GetKeys(
+      GetKeysConversion convert = GetKeysConversion::kKeepNumbers);
   Maybe<bool> CollectKeys(Handle<JSReceiver> receiver,
                           Handle<JSReceiver> object);
-  void CollectOwnElementIndices(Handle<JSObject> object);
-  void CollectOwnPropertyNames(Handle<JSObject> object);
+  Maybe<bool> CollectOwnElementIndices(Handle<JSReceiver> receiver,
+                                       Handle<JSObject> object);
+  Maybe<bool> CollectOwnPropertyNames(Handle<JSReceiver> receiver,
+                                      Handle<JSObject> object);
+  Maybe<bool> CollectAccessCheckInterceptorKeys(
+      Handle<AccessCheckInfo> access_check_info, Handle<JSReceiver> receiver,
+      Handle<JSObject> object);
 
   static Handle<FixedArray> GetEnumPropertyKeys(Isolate* isolate,
                                                 Handle<JSObject> object);
 
-  bool AddKey(uint32_t key);
-  bool AddKey(Object* key, AddKeyConversion convert);
-  bool AddKey(Handle<Object> key, AddKeyConversion convert);
+  void AddKey(Object* key, AddKeyConversion convert = DO_NOT_CONVERT);
+  void AddKey(Handle<Object> key, AddKeyConversion convert = DO_NOT_CONVERT);
   void AddKeys(Handle<FixedArray> array, AddKeyConversion convert);
-  void AddKeys(Handle<JSObject> array, AddKeyConversion convert);
-  void AddElementKeysFromInterceptor(Handle<JSObject> array);
+  void AddKeys(Handle<JSObject> array_like, AddKeyConversion convert);
 
   // Jump to the next level, pushing the current |levelLength_| to
   // |levelLengths_| and adding a new list to |elements_|.
-  void NextPrototype();
-  // Sort the integer indices in the last list in |elements_|
-  void SortCurrentElementsList();
-  int length() { return length_; }
   Isolate* isolate() { return isolate_; }
   PropertyFilter filter() { return filter_; }
   void set_filter_proxy_keys(bool filter) { filter_proxy_keys_ = filter; }
+  void set_is_for_in(bool value) { is_for_in_ = value; }
+  void set_skip_indices(bool value) { skip_indices_ = value; }
+  void set_last_non_empty_prototype(Handle<JSReceiver> object) {
+    last_non_empty_prototype_ = object;
+  }
 
  private:
   Maybe<bool> CollectOwnKeys(Handle<JSReceiver> receiver,
@@ -78,35 +83,18 @@
   Maybe<bool> AddKeysFromJSProxy(Handle<JSProxy> proxy,
                                  Handle<FixedArray> keys);
 
-  bool AddIntegerKey(uint32_t key);
-  bool AddStringKey(Handle<Object> key, AddKeyConversion convert);
-  bool AddSymbolKey(Handle<Object> array);
-  void SortCurrentElementsListRemoveDuplicates();
+  Handle<OrderedHashSet> keys() { return Handle<OrderedHashSet>::cast(keys_); }
 
   Isolate* isolate_;
-  KeyCollectionType type_;
+  // keys_ is either an Handle<OrderedHashSet> or in the case of own JSProxy
+  // keys a Handle<FixedArray>.
+  Handle<FixedArray> keys_;
+  Handle<JSReceiver> last_non_empty_prototype_;
+  KeyCollectionMode mode_;
   PropertyFilter filter_;
   bool filter_proxy_keys_ = true;
-  // |elements_| contains the sorted element keys (indices) per level.
-  std::vector<std::vector<uint32_t>*> elements_;
-  // |protoLengths_| contains the total number of keys (elements + properties)
-  // per level. Negative values mark counts for a level with keys from a proxy.
-  std::vector<int> level_lengths_;
-  // |string_properties_| contains the unique String property keys for all
-  // levels in insertion order per level.
-  Handle<OrderedHashSet> string_properties_;
-  // |symbol_properties_| contains the unique Symbol property keys for all
-  // levels in insertion order per level.
-  Handle<OrderedHashSet> symbol_properties_;
-  Handle<FixedArray> ownProxyKeys_;
-  // |length_| keeps track of the total number of all element and property keys.
-  int length_ = 0;
-  // |levelLength_| keeps track of the number of String keys in the current
-  // level.
-  int level_string_length_ = 0;
-  // |levelSymbolLength_| keeps track of the number of Symbol keys in the
-  // current level.
-  int level_symbol_length_ = 0;
+  bool is_for_in_ = false;
+  bool skip_indices_ = false;
 
   DISALLOW_COPY_AND_ASSIGN(KeyAccumulator);
 };
@@ -117,16 +105,18 @@
 class FastKeyAccumulator {
  public:
   FastKeyAccumulator(Isolate* isolate, Handle<JSReceiver> receiver,
-                     KeyCollectionType type, PropertyFilter filter)
-      : isolate_(isolate), receiver_(receiver), type_(type), filter_(filter) {
+                     KeyCollectionMode mode, PropertyFilter filter)
+      : isolate_(isolate), receiver_(receiver), mode_(mode), filter_(filter) {
     Prepare();
   }
 
   bool is_receiver_simple_enum() { return is_receiver_simple_enum_; }
   bool has_empty_prototype() { return has_empty_prototype_; }
   void set_filter_proxy_keys(bool filter) { filter_proxy_keys_ = filter; }
+  void set_is_for_in(bool value) { is_for_in_ = value; }
 
-  MaybeHandle<FixedArray> GetKeys(GetKeysConversion convert = KEEP_NUMBERS);
+  MaybeHandle<FixedArray> GetKeys(
+      GetKeysConversion convert = GetKeysConversion::kKeepNumbers);
 
  private:
   void Prepare();
@@ -135,9 +125,11 @@
 
   Isolate* isolate_;
   Handle<JSReceiver> receiver_;
-  KeyCollectionType type_;
+  Handle<JSReceiver> last_non_empty_prototype_;
+  KeyCollectionMode mode_;
   PropertyFilter filter_;
   bool filter_proxy_keys_ = true;
+  bool is_for_in_ = false;
   bool is_receiver_simple_enum_ = false;
   bool has_empty_prototype_ = false;
 
diff --git a/src/libsampler/DEPS b/src/libsampler/DEPS
new file mode 100644
index 0000000..bdf1a82
--- /dev/null
+++ b/src/libsampler/DEPS
@@ -0,0 +1,6 @@
+include_rules = [
+  "+include",
+  "-src",
+  "+src/base",
+  "+src/libsampler",
+]
\ No newline at end of file
diff --git a/src/libsampler/v8-sampler.cc b/src/libsampler/v8-sampler.cc
new file mode 100644
index 0000000..edf6df1
--- /dev/null
+++ b/src/libsampler/v8-sampler.cc
@@ -0,0 +1,673 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/libsampler/v8-sampler.h"
+
+#if V8_OS_POSIX && !V8_OS_CYGWIN
+
+#define USE_SIGNALS
+
+#include <errno.h>
+#include <pthread.h>
+#include <signal.h>
+#include <sys/time.h>
+
+#if !V8_OS_QNX && !V8_OS_NACL && !V8_OS_AIX
+#include <sys/syscall.h>  // NOLINT
+#endif
+
+#if V8_OS_MACOSX
+#include <mach/mach.h>
+// OpenBSD doesn't have <ucontext.h>. ucontext_t lives in <signal.h>
+// and is a typedef for struct sigcontext. There is no uc_mcontext.
+#elif(!V8_OS_ANDROID || defined(__BIONIC_HAVE_UCONTEXT_T)) && \
+    !V8_OS_OPENBSD && !V8_OS_NACL
+#include <ucontext.h>
+#endif
+
+#include <unistd.h>
+
+// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
+// Old versions of the C library <signal.h> didn't define the type.
+#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
+    (defined(__arm__) || defined(__aarch64__)) && \
+    !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
+#include <asm/sigcontext.h>  // NOLINT
+#endif
+
+#elif V8_OS_WIN || V8_OS_CYGWIN
+
+#include "src/base/win32-headers.h"
+
+#endif
+
+#include <algorithm>
+#include <vector>
+#include <map>
+
+#include "src/base/atomic-utils.h"
+#include "src/base/hashmap.h"
+#include "src/base/platform/platform.h"
+
+#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
+
+// Not all versions of Android's C library provide ucontext_t.
+// Detect this and provide custom but compatible definitions. Note that these
+// follow the GLibc naming convention to access register values from
+// mcontext_t.
+//
+// See http://code.google.com/p/android/issues/detail?id=34784
+
+#if defined(__arm__)
+
+typedef struct sigcontext mcontext_t;
+
+typedef struct ucontext {
+  uint32_t uc_flags;
+  struct ucontext* uc_link;
+  stack_t uc_stack;
+  mcontext_t uc_mcontext;
+  // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+
+#elif defined(__aarch64__)
+
+typedef struct sigcontext mcontext_t;
+
+typedef struct ucontext {
+  uint64_t uc_flags;
+  struct ucontext *uc_link;
+  stack_t uc_stack;
+  mcontext_t uc_mcontext;
+  // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+
+#elif defined(__mips__)
+// MIPS version of sigcontext, for Android bionic.
+typedef struct {
+  uint32_t regmask;
+  uint32_t status;
+  uint64_t pc;
+  uint64_t gregs[32];
+  uint64_t fpregs[32];
+  uint32_t acx;
+  uint32_t fpc_csr;
+  uint32_t fpc_eir;
+  uint32_t used_math;
+  uint32_t dsp;
+  uint64_t mdhi;
+  uint64_t mdlo;
+  uint32_t hi1;
+  uint32_t lo1;
+  uint32_t hi2;
+  uint32_t lo2;
+  uint32_t hi3;
+  uint32_t lo3;
+} mcontext_t;
+
+typedef struct ucontext {
+  uint32_t uc_flags;
+  struct ucontext* uc_link;
+  stack_t uc_stack;
+  mcontext_t uc_mcontext;
+  // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+
+#elif defined(__i386__)
+// x86 version for Android.
+typedef struct {
+  uint32_t gregs[19];
+  void* fpregs;
+  uint32_t oldmask;
+  uint32_t cr2;
+} mcontext_t;
+
+typedef uint32_t kernel_sigset_t[2];  // x86 kernel uses 64-bit signal masks
+typedef struct ucontext {
+  uint32_t uc_flags;
+  struct ucontext* uc_link;
+  stack_t uc_stack;
+  mcontext_t uc_mcontext;
+  // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
+
+#elif defined(__x86_64__)
+// x64 version for Android.
+typedef struct {
+  uint64_t gregs[23];
+  void* fpregs;
+  uint64_t __reserved1[8];
+} mcontext_t;
+
+typedef struct ucontext {
+  uint64_t uc_flags;
+  struct ucontext *uc_link;
+  stack_t uc_stack;
+  mcontext_t uc_mcontext;
+  // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+enum { REG_RBP = 10, REG_RSP = 15, REG_RIP = 16 };
+#endif
+
+#endif  // V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
+
+
+namespace v8 {
+namespace sampler {
+
+namespace {
+
+#if defined(USE_SIGNALS)
+typedef std::vector<Sampler*> SamplerList;
+typedef SamplerList::iterator SamplerListIterator;
+typedef base::AtomicValue<bool> AtomicMutex;
+
+class AtomicGuard {
+ public:
+  explicit AtomicGuard(AtomicMutex* atomic, bool is_blocking = true)
+      : atomic_(atomic), is_success_(false) {
+    do {
+      // Use Acquire_Load to gain mutual exclusion.
+      USE(atomic_->Value());
+      is_success_ = atomic_->TrySetValue(false, true);
+    } while (is_blocking && !is_success_);
+  }
+
+  bool is_success() const { return is_success_; }
+
+  ~AtomicGuard() {
+    if (!is_success_) return;
+    atomic_->SetValue(false);
+  }
+
+ private:
+  AtomicMutex* const atomic_;
+  bool is_success_;
+};
+
+// Returns key for hash map.
+void* ThreadKey(pthread_t thread_id) {
+  return reinterpret_cast<void*>(thread_id);
+}
+
+// Returns hash value for hash map.
+uint32_t ThreadHash(pthread_t thread_id) {
+#if V8_OS_MACOSX
+  return static_cast<uint32_t>(reinterpret_cast<intptr_t>(thread_id));
+#else
+  return static_cast<uint32_t>(thread_id);
+#endif
+}
+
+#endif  // USE_SIGNALS
+
+}  // namespace
+
+#if defined(USE_SIGNALS)
+
+class Sampler::PlatformData {
+ public:
+  PlatformData() : vm_tid_(pthread_self()) {}
+  pthread_t vm_tid() const { return vm_tid_; }
+
+ private:
+  pthread_t vm_tid_;
+};
+
+class SamplerManager {
+ public:
+  SamplerManager() : sampler_map_(base::HashMap::PointersMatch) {}
+
+  void AddSampler(Sampler* sampler) {
+    AtomicGuard atomic_guard(&samplers_access_counter_);
+    DCHECK(sampler->IsActive() || !sampler->IsRegistered());
+    // Add sampler into map if needed.
+    pthread_t thread_id = sampler->platform_data()->vm_tid();
+    base::HashMap::Entry* entry =
+            sampler_map_.LookupOrInsert(ThreadKey(thread_id),
+                                        ThreadHash(thread_id));
+    DCHECK(entry != nullptr);
+    if (entry->value == nullptr) {
+      SamplerList* samplers = new SamplerList();
+      samplers->push_back(sampler);
+      entry->value = samplers;
+    } else {
+      SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
+      bool exists = false;
+      for (SamplerListIterator iter = samplers->begin();
+           iter != samplers->end(); ++iter) {
+        if (*iter == sampler) {
+          exists = true;
+          break;
+        }
+      }
+      if (!exists) {
+        samplers->push_back(sampler);
+      }
+    }
+  }
+
+  void RemoveSampler(Sampler* sampler) {
+    AtomicGuard atomic_guard(&samplers_access_counter_);
+    DCHECK(sampler->IsActive() || sampler->IsRegistered());
+    // Remove sampler from map.
+    pthread_t thread_id = sampler->platform_data()->vm_tid();
+    void* thread_key = ThreadKey(thread_id);
+    uint32_t thread_hash = ThreadHash(thread_id);
+    base::HashMap::Entry* entry = sampler_map_.Lookup(thread_key, thread_hash);
+    DCHECK(entry != nullptr);
+    SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
+    for (SamplerListIterator iter = samplers->begin(); iter != samplers->end();
+         ++iter) {
+      if (*iter == sampler) {
+        samplers->erase(iter);
+        break;
+      }
+    }
+    if (samplers->empty()) {
+      sampler_map_.Remove(thread_key, thread_hash);
+      delete samplers;
+    }
+  }
+
+#if defined(USE_SIGNALS)
+  void DoSample(const v8::RegisterState& state) {
+    AtomicGuard atomic_guard(&SamplerManager::samplers_access_counter_, false);
+    if (!atomic_guard.is_success()) return;
+    pthread_t thread_id = pthread_self();
+    base::HashMap::Entry* entry =
+        sampler_map_.Lookup(ThreadKey(thread_id), ThreadHash(thread_id));
+    if (!entry) return;
+    SamplerList& samplers = *static_cast<SamplerList*>(entry->value);
+
+    for (int i = 0; i < samplers.size(); ++i) {
+      Sampler* sampler = samplers[i];
+      Isolate* isolate = sampler->isolate();
+      // We require a fully initialized and entered isolate.
+      if (isolate == nullptr || !isolate->IsInUse()) continue;
+      if (v8::Locker::IsActive() && !Locker::IsLocked(isolate)) continue;
+      sampler->SampleStack(state);
+    }
+  }
+#endif
+
+  static SamplerManager* instance() { return instance_.Pointer(); }
+
+ private:
+  base::HashMap sampler_map_;
+  static AtomicMutex samplers_access_counter_;
+  static base::LazyInstance<SamplerManager>::type instance_;
+};
+
+AtomicMutex SamplerManager::samplers_access_counter_;
+base::LazyInstance<SamplerManager>::type SamplerManager::instance_ =
+    LAZY_INSTANCE_INITIALIZER;
+
+#elif V8_OS_WIN || V8_OS_CYGWIN
+
+// ----------------------------------------------------------------------------
+// Win32 profiler support. On Cygwin we use the same sampler implementation as
+// on Win32.
+
+class Sampler::PlatformData {
+ public:
+  // Get a handle to the calling thread. This is the thread that we are
+  // going to profile. We need to make a copy of the handle because we are
+  // going to use it in the sampler thread. Using GetThreadHandle() will
+  // not work in this case. We're using OpenThread because DuplicateHandle
+  // for some reason doesn't work in Chrome's sandbox.
+  PlatformData()
+      : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
+                                    THREAD_SUSPEND_RESUME |
+                                    THREAD_QUERY_INFORMATION,
+                                    false,
+                                    GetCurrentThreadId())) {}
+
+  ~PlatformData() {
+    if (profiled_thread_ != nullptr) {
+      CloseHandle(profiled_thread_);
+      profiled_thread_ = nullptr;
+    }
+  }
+
+  HANDLE profiled_thread() { return profiled_thread_; }
+
+ private:
+  HANDLE profiled_thread_;
+};
+#endif  // USE_SIGNALS
+
+
+#if defined(USE_SIGNALS)
+class SignalHandler {
+ public:
+  static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); }
+  static void TearDown() {
+    delete mutex_;
+    mutex_ = nullptr;
+  }
+
+  static void IncreaseSamplerCount() {
+    base::LockGuard<base::Mutex> lock_guard(mutex_);
+    if (++client_count_ == 1) Install();
+  }
+
+  static void DecreaseSamplerCount() {
+    base::LockGuard<base::Mutex> lock_guard(mutex_);
+    if (--client_count_ == 0) Restore();
+  }
+
+  static bool Installed() {
+    base::LockGuard<base::Mutex> lock_guard(mutex_);
+    return signal_handler_installed_;
+  }
+
+ private:
+  static void Install() {
+#if !V8_OS_NACL
+    struct sigaction sa;
+    sa.sa_sigaction = &HandleProfilerSignal;
+    sigemptyset(&sa.sa_mask);
+#if V8_OS_QNX
+    sa.sa_flags = SA_SIGINFO;
+#else
+    sa.sa_flags = SA_RESTART | SA_SIGINFO;
+#endif
+    signal_handler_installed_ =
+        (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
+#endif  // !V8_OS_NACL
+  }
+
+  static void Restore() {
+#if !V8_OS_NACL
+    if (signal_handler_installed_) {
+      sigaction(SIGPROF, &old_signal_handler_, 0);
+      signal_handler_installed_ = false;
+    }
+#endif
+  }
+
+#if !V8_OS_NACL
+  static void FillRegisterState(void* context, RegisterState* regs);
+  static void HandleProfilerSignal(int signal, siginfo_t* info, void* context);
+#endif
+  // Protects the process wide state below.
+  static base::Mutex* mutex_;
+  static int client_count_;
+  static bool signal_handler_installed_;
+  static struct sigaction old_signal_handler_;
+};
+
+base::Mutex* SignalHandler::mutex_ = nullptr;
+int SignalHandler::client_count_ = 0;
+struct sigaction SignalHandler::old_signal_handler_;
+bool SignalHandler::signal_handler_installed_ = false;
+
+
+// As Native Client does not support signal handling, profiling is disabled.
+#if !V8_OS_NACL
+void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
+                                         void* context) {
+  USE(info);
+  if (signal != SIGPROF) return;
+  v8::RegisterState state;
+  FillRegisterState(context, &state);
+  SamplerManager::instance()->DoSample(state);
+}
+
+void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
+  // Extracting the sample from the context is extremely machine dependent.
+  ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+#if !(V8_OS_OPENBSD || (V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390)))
+  mcontext_t& mcontext = ucontext->uc_mcontext;
+#endif
+#if V8_OS_LINUX
+#if V8_HOST_ARCH_IA32
+  state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_EIP]);
+  state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_ESP]);
+  state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_EBP]);
+#elif V8_HOST_ARCH_X64
+  state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_RIP]);
+  state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_RSP]);
+  state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_RBP]);
+#elif V8_HOST_ARCH_ARM
+#if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
+  // Old GLibc ARM versions used a gregs[] array to access the register
+  // values from mcontext_t.
+  state->pc = reinterpret_cast<void*>(mcontext.gregs[R15]);
+  state->sp = reinterpret_cast<void*>(mcontext.gregs[R13]);
+  state->fp = reinterpret_cast<void*>(mcontext.gregs[R11]);
+#else
+  state->pc = reinterpret_cast<void*>(mcontext.arm_pc);
+  state->sp = reinterpret_cast<void*>(mcontext.arm_sp);
+  state->fp = reinterpret_cast<void*>(mcontext.arm_fp);
+#endif  // V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
+#elif V8_HOST_ARCH_ARM64
+  state->pc = reinterpret_cast<void*>(mcontext.pc);
+  state->sp = reinterpret_cast<void*>(mcontext.sp);
+  // FP is an alias for x29.
+  state->fp = reinterpret_cast<void*>(mcontext.regs[29]);
+#elif V8_HOST_ARCH_MIPS
+  state->pc = reinterpret_cast<void*>(mcontext.pc);
+  state->sp = reinterpret_cast<void*>(mcontext.gregs[29]);
+  state->fp = reinterpret_cast<void*>(mcontext.gregs[30]);
+#elif V8_HOST_ARCH_MIPS64
+  state->pc = reinterpret_cast<void*>(mcontext.pc);
+  state->sp = reinterpret_cast<void*>(mcontext.gregs[29]);
+  state->fp = reinterpret_cast<void*>(mcontext.gregs[30]);
+#elif V8_HOST_ARCH_PPC
+  state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.regs->nip);
+  state->sp =
+      reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R1]);
+  state->fp =
+      reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R31]);
+#elif V8_HOST_ARCH_S390
+#if V8_TARGET_ARCH_32_BIT
+  // 31-bit target will have bit 0 (MSB) of the PSW set to denote addressing
+  // mode.  This bit needs to be masked out to resolve actual address.
+  state->pc =
+      reinterpret_cast<void*>(ucontext->uc_mcontext.psw.addr & 0x7FFFFFFF);
+#else
+  state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.psw.addr);
+#endif  // V8_TARGET_ARCH_32_BIT
+  state->sp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[15]);
+  state->fp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[11]);
+#endif  // V8_HOST_ARCH_*
+#elif V8_OS_MACOSX
+#if V8_HOST_ARCH_X64
+#if __DARWIN_UNIX03
+  state->pc = reinterpret_cast<void*>(mcontext->__ss.__rip);
+  state->sp = reinterpret_cast<void*>(mcontext->__ss.__rsp);
+  state->fp = reinterpret_cast<void*>(mcontext->__ss.__rbp);
+#else  // !__DARWIN_UNIX03
+  state->pc = reinterpret_cast<void*>(mcontext->ss.rip);
+  state->sp = reinterpret_cast<void*>(mcontext->ss.rsp);
+  state->fp = reinterpret_cast<void*>(mcontext->ss.rbp);
+#endif  // __DARWIN_UNIX03
+#elif V8_HOST_ARCH_IA32
+#if __DARWIN_UNIX03
+  state->pc = reinterpret_cast<void*>(mcontext->__ss.__eip);
+  state->sp = reinterpret_cast<void*>(mcontext->__ss.__esp);
+  state->fp = reinterpret_cast<void*>(mcontext->__ss.__ebp);
+#else  // !__DARWIN_UNIX03
+  state->pc = reinterpret_cast<void*>(mcontext->ss.eip);
+  state->sp = reinterpret_cast<void*>(mcontext->ss.esp);
+  state->fp = reinterpret_cast<void*>(mcontext->ss.ebp);
+#endif  // __DARWIN_UNIX03
+#endif  // V8_HOST_ARCH_IA32
+#elif V8_OS_FREEBSD
+#if V8_HOST_ARCH_IA32
+  state->pc = reinterpret_cast<void*>(mcontext.mc_eip);
+  state->sp = reinterpret_cast<void*>(mcontext.mc_esp);
+  state->fp = reinterpret_cast<void*>(mcontext.mc_ebp);
+#elif V8_HOST_ARCH_X64
+  state->pc = reinterpret_cast<void*>(mcontext.mc_rip);
+  state->sp = reinterpret_cast<void*>(mcontext.mc_rsp);
+  state->fp = reinterpret_cast<void*>(mcontext.mc_rbp);
+#elif V8_HOST_ARCH_ARM
+  state->pc = reinterpret_cast<void*>(mcontext.mc_r15);
+  state->sp = reinterpret_cast<void*>(mcontext.mc_r13);
+  state->fp = reinterpret_cast<void*>(mcontext.mc_r11);
+#endif  // V8_HOST_ARCH_*
+#elif V8_OS_NETBSD
+#if V8_HOST_ARCH_IA32
+  state->pc = reinterpret_cast<void*>(mcontext.__gregs[_REG_EIP]);
+  state->sp = reinterpret_cast<void*>(mcontext.__gregs[_REG_ESP]);
+  state->fp = reinterpret_cast<void*>(mcontext.__gregs[_REG_EBP]);
+#elif V8_HOST_ARCH_X64
+  state->pc = reinterpret_cast<void*>(mcontext.__gregs[_REG_RIP]);
+  state->sp = reinterpret_cast<void*>(mcontext.__gregs[_REG_RSP]);
+  state->fp = reinterpret_cast<void*>(mcontext.__gregs[_REG_RBP]);
+#endif  // V8_HOST_ARCH_*
+#elif V8_OS_OPENBSD
+#if V8_HOST_ARCH_IA32
+  state->pc = reinterpret_cast<void*>(ucontext->sc_eip);
+  state->sp = reinterpret_cast<void*>(ucontext->sc_esp);
+  state->fp = reinterpret_cast<void*>(ucontext->sc_ebp);
+#elif V8_HOST_ARCH_X64
+  state->pc = reinterpret_cast<void*>(ucontext->sc_rip);
+  state->sp = reinterpret_cast<void*>(ucontext->sc_rsp);
+  state->fp = reinterpret_cast<void*>(ucontext->sc_rbp);
+#endif  // V8_HOST_ARCH_*
+#elif V8_OS_SOLARIS
+  state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_PC]);
+  state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_SP]);
+  state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_FP]);
+#elif V8_OS_QNX
+#if V8_HOST_ARCH_IA32
+  state->pc = reinterpret_cast<void*>(mcontext.cpu.eip);
+  state->sp = reinterpret_cast<void*>(mcontext.cpu.esp);
+  state->fp = reinterpret_cast<void*>(mcontext.cpu.ebp);
+#elif V8_HOST_ARCH_ARM
+  state->pc = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_PC]);
+  state->sp = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_SP]);
+  state->fp = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_FP]);
+#endif  // V8_HOST_ARCH_*
+#elif V8_OS_AIX
+  state->pc = reinterpret_cast<void*>(mcontext.jmp_context.iar);
+  state->sp = reinterpret_cast<void*>(mcontext.jmp_context.gpr[1]);
+  state->fp = reinterpret_cast<void*>(mcontext.jmp_context.gpr[31]);
+#endif  // V8_OS_AIX
+}
+
+#endif  // !V8_OS_NACL
+
+#endif  // USE_SIGNALS
+
+
+void Sampler::SetUp() {
+#if defined(USE_SIGNALS)
+  SignalHandler::SetUp();
+#endif
+}
+
+
+void Sampler::TearDown() {
+#if defined(USE_SIGNALS)
+  SignalHandler::TearDown();
+#endif
+}
+
+Sampler::Sampler(Isolate* isolate)
+    : is_counting_samples_(false),
+      js_sample_count_(0),
+      external_sample_count_(0),
+      isolate_(isolate),
+      profiling_(false),
+      has_processing_thread_(false),
+      active_(false),
+      registered_(false) {
+  data_ = new PlatformData;
+}
+
+Sampler::~Sampler() {
+  DCHECK(!IsActive());
+#if defined(USE_SIGNALS)
+  if (IsRegistered()) {
+    SamplerManager::instance()->RemoveSampler(this);
+  }
+#endif
+  delete data_;
+}
+
+void Sampler::Start() {
+  DCHECK(!IsActive());
+  SetActive(true);
+#if defined(USE_SIGNALS)
+  SamplerManager::instance()->AddSampler(this);
+#endif
+}
+
+
+void Sampler::Stop() {
+#if defined(USE_SIGNALS)
+  SamplerManager::instance()->RemoveSampler(this);
+#endif
+  DCHECK(IsActive());
+  SetActive(false);
+  SetRegistered(false);
+}
+
+
+void Sampler::IncreaseProfilingDepth() {
+  base::NoBarrier_AtomicIncrement(&profiling_, 1);
+#if defined(USE_SIGNALS)
+  SignalHandler::IncreaseSamplerCount();
+#endif
+}
+
+
+void Sampler::DecreaseProfilingDepth() {
+#if defined(USE_SIGNALS)
+  SignalHandler::DecreaseSamplerCount();
+#endif
+  base::NoBarrier_AtomicIncrement(&profiling_, -1);
+}
+
+
+#if defined(USE_SIGNALS)
+
+void Sampler::DoSample() {
+  if (!SignalHandler::Installed()) return;
+  if (!IsActive() && !IsRegistered()) {
+    SamplerManager::instance()->AddSampler(this);
+    SetRegistered(true);
+  }
+  pthread_kill(platform_data()->vm_tid(), SIGPROF);
+}
+
+#elif V8_OS_WIN || V8_OS_CYGWIN
+
+void Sampler::DoSample() {
+  HANDLE profiled_thread = platform_data()->profiled_thread();
+  if (profiled_thread == nullptr) return;
+
+  const DWORD kSuspendFailed = static_cast<DWORD>(-1);
+  if (SuspendThread(profiled_thread) == kSuspendFailed) return;
+
+  // Context used for sampling the register state of the profiled thread.
+  CONTEXT context;
+  memset(&context, 0, sizeof(context));
+  context.ContextFlags = CONTEXT_FULL;
+  if (GetThreadContext(profiled_thread, &context) != 0) {
+    v8::RegisterState state;
+#if V8_HOST_ARCH_X64
+    state.pc = reinterpret_cast<void*>(context.Rip);
+    state.sp = reinterpret_cast<void*>(context.Rsp);
+    state.fp = reinterpret_cast<void*>(context.Rbp);
+#else
+    state.pc = reinterpret_cast<void*>(context.Eip);
+    state.sp = reinterpret_cast<void*>(context.Esp);
+    state.fp = reinterpret_cast<void*>(context.Ebp);
+#endif
+    SampleStack(state);
+  }
+  ResumeThread(profiled_thread);
+}
+
+#endif  // USE_SIGNALS
+
+}  // namespace sampler
+}  // namespace v8
diff --git a/src/profiler/sampler.h b/src/libsampler/v8-sampler.h
similarity index 79%
rename from src/profiler/sampler.h
rename to src/libsampler/v8-sampler.h
index 3d3a6e9..7ae3c8c 100644
--- a/src/profiler/sampler.h
+++ b/src/libsampler/v8-sampler.h
@@ -1,9 +1,9 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
+// Copyright 2016 the V8 project authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_PROFILER_SAMPLER_H_
-#define V8_PROFILER_SAMPLER_H_
+#ifndef V8_LIBSAMPLER_SAMPLER_H_
+#define V8_LIBSAMPLER_SAMPLER_H_
 
 #include "include/v8.h"
 
@@ -11,10 +11,7 @@
 #include "src/base/macros.h"
 
 namespace v8 {
-namespace internal {
-
-class Isolate;
-struct TickSample;
+namespace sampler {
 
 // ----------------------------------------------------------------------------
 // Sampler
@@ -25,19 +22,23 @@
 
 class Sampler {
  public:
+  static const int kMaxFramesCountLog2 = 8;
+  static const unsigned kMaxFramesCount = (1u << kMaxFramesCountLog2) - 1;
+
   // Initializes the Sampler support. Called once at VM startup.
   static void SetUp();
   static void TearDown();
 
   // Initialize sampler.
-  Sampler(Isolate* isolate, int interval);
+  explicit Sampler(Isolate* isolate);
   virtual ~Sampler();
 
   Isolate* isolate() const { return isolate_; }
-  int interval() const { return interval_; }
 
   // Performs stack sampling.
-  void SampleStack(const v8::RegisterState& regs);
+  // Clients should override this method in order to do something on samples,
+  // for example buffer samples in a queue.
+  virtual void SampleStack(const v8::RegisterState& regs) = 0;
 
   // Start and stop sampler.
   void Start();
@@ -60,8 +61,7 @@
   bool IsRegistered() const { return base::NoBarrier_Load(&registered_); }
 
   void DoSample();
-  // If true next sample must be initiated on the profiler event processor
-  // thread right after latest sample is processed.
+
   void SetHasProcessingThread(bool value) {
     base::NoBarrier_Store(&has_processing_thread_, value);
   }
@@ -79,30 +79,25 @@
   PlatformData* platform_data() const { return data_; }
 
  protected:
-  // This method is called for each sampling period with the current
-  // program counter.
-  virtual void Tick(TickSample* sample) = 0;
+  // Counts stack samples taken in various VM states.
+  bool is_counting_samples_;
+  unsigned js_sample_count_;
+  unsigned external_sample_count_;
 
  private:
   void SetActive(bool value) { base::NoBarrier_Store(&active_, value); }
-
   void SetRegistered(bool value) { base::NoBarrier_Store(&registered_, value); }
 
   Isolate* isolate_;
-  const int interval_;
   base::Atomic32 profiling_;
   base::Atomic32 has_processing_thread_;
   base::Atomic32 active_;
   base::Atomic32 registered_;
   PlatformData* data_;  // Platform specific data.
-  // Counts stack samples taken in various VM states.
-  bool is_counting_samples_;
-  unsigned js_sample_count_;
-  unsigned external_sample_count_;
   DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
 };
 
-}  // namespace internal
+}  // namespace sampler
 }  // namespace v8
 
-#endif  // V8_PROFILER_SAMPLER_H_
+#endif  // V8_LIBSAMPLER_SAMPLER_H_
diff --git a/src/log-inl.h b/src/log-inl.h
index 765398f..b986597 100644
--- a/src/log-inl.h
+++ b/src/log-inl.h
@@ -13,25 +13,24 @@
 namespace v8 {
 namespace internal {
 
-Logger::LogEventsAndTags Logger::ToNativeByScript(Logger::LogEventsAndTags tag,
-                                                  Script* script) {
-  if ((tag == FUNCTION_TAG || tag == LAZY_COMPILE_TAG || tag == SCRIPT_TAG) &&
-      script->type() == Script::TYPE_NATIVE) {
-    switch (tag) {
-      case FUNCTION_TAG: return NATIVE_FUNCTION_TAG;
-      case LAZY_COMPILE_TAG: return NATIVE_LAZY_COMPILE_TAG;
-      case SCRIPT_TAG: return NATIVE_SCRIPT_TAG;
-      default: return tag;
-    }
-  } else {
-    return tag;
+CodeEventListener::LogEventsAndTags Logger::ToNativeByScript(
+    CodeEventListener::LogEventsAndTags tag, Script* script) {
+  if (script->type() != Script::TYPE_NATIVE) return tag;
+  switch (tag) {
+    case CodeEventListener::FUNCTION_TAG:
+      return CodeEventListener::NATIVE_FUNCTION_TAG;
+    case CodeEventListener::LAZY_COMPILE_TAG:
+      return CodeEventListener::NATIVE_LAZY_COMPILE_TAG;
+    case CodeEventListener::SCRIPT_TAG:
+      return CodeEventListener::NATIVE_SCRIPT_TAG;
+    default:
+      return tag;
   }
 }
 
-
 void Logger::CallEventLogger(Isolate* isolate, const char* name, StartEnd se,
                              bool expose_to_api) {
-  if (isolate->event_logger() != NULL) {
+  if (isolate->event_logger()) {
     if (isolate->event_logger() == DefaultEventLoggerSentinel) {
       LOG(isolate, TimerEvent(se, name));
     } else if (expose_to_api) {
@@ -39,6 +38,7 @@
     }
   }
 }
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/log-utils.cc b/src/log-utils.cc
index a83a0ae..22972ec 100644
--- a/src/log-utils.cc
+++ b/src/log-utils.cc
@@ -164,12 +164,14 @@
   }
 }
 
-void Log::MessageBuilder::AppendAddress(Address addr) { Append("%p", addr); }
+void Log::MessageBuilder::AppendAddress(Address addr) {
+  Append("%p", static_cast<void*>(addr));
+}
 
 void Log::MessageBuilder::AppendSymbolName(Symbol* symbol) {
   DCHECK(symbol);
   Append("symbol(");
-  if (!symbol->name()->IsUndefined()) {
+  if (!symbol->name()->IsUndefined(symbol->GetIsolate())) {
     Append("\"");
     AppendDetailed(String::cast(symbol->name()), false);
     Append("\" ");
diff --git a/src/log.cc b/src/log.cc
index 97acea9..43c3981 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -11,15 +11,18 @@
 #include "src/base/platform/platform.h"
 #include "src/bootstrapper.h"
 #include "src/code-stubs.h"
+#include "src/counters.h"
 #include "src/deoptimizer.h"
 #include "src/global-handles.h"
 #include "src/interpreter/bytecodes.h"
 #include "src/interpreter/interpreter.h"
+#include "src/libsampler/v8-sampler.h"
 #include "src/log-inl.h"
 #include "src/log-utils.h"
 #include "src/macro-assembler.h"
 #include "src/perf-jit.h"
-#include "src/profiler/cpu-profiler.h"
+#include "src/profiler/cpu-profiler-inl.h"
+#include "src/profiler/profiler-listener.h"
 #include "src/runtime-profiler.h"
 #include "src/string-stream.h"
 #include "src/vm-state-inl.h"
@@ -29,25 +32,10 @@
 
 
 #define DECLARE_EVENT(ignore1, name) name,
-static const char* const kLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
-  LOG_EVENTS_AND_TAGS_LIST(DECLARE_EVENT)
-};
+static const char* kLogEventsNames[CodeEventListener::NUMBER_OF_LOG_EVENTS] = {
+    LOG_EVENTS_AND_TAGS_LIST(DECLARE_EVENT)};
 #undef DECLARE_EVENT
 
-
-#define CALL_LISTENERS(Call)                    \
-for (int i = 0; i < listeners_.length(); ++i) { \
-  listeners_[i]->Call;                          \
-}
-
-#define PROFILER_LOG(Call)                                \
-  do {                                                    \
-    CpuProfiler* cpu_profiler = isolate_->cpu_profiler(); \
-    if (cpu_profiler->is_profiling()) {                   \
-      cpu_profiler->Call;                                 \
-    }                                                     \
-  } while (false);
-
 static const char* ComputeMarker(SharedFunctionInfo* shared,
                                  AbstractCode* code) {
   switch (code->kind()) {
@@ -70,7 +58,7 @@
     utf8_pos_ = 0;
   }
 
-  void Init(Logger::LogEventsAndTags tag) {
+  void Init(CodeEventListener::LogEventsAndTags tag) {
     Reset();
     AppendBytes(kLogEventsNames[tag]);
     AppendByte(':');
@@ -82,7 +70,7 @@
     } else {
       Symbol* symbol = Symbol::cast(name);
       AppendBytes("symbol(");
-      if (!symbol->name()->IsUndefined()) {
+      if (!symbol->name()->IsUndefined(symbol->GetIsolate())) {
         AppendBytes("\"");
         AppendString(String::cast(symbol->name()));
         AppendBytes("\" ");
@@ -164,21 +152,21 @@
 
 CodeEventLogger::~CodeEventLogger() { delete name_buffer_; }
 
-void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
+void CodeEventLogger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
                                       AbstractCode* code, const char* comment) {
   name_buffer_->Init(tag);
   name_buffer_->AppendBytes(comment);
   LogRecordedBuffer(code, NULL, name_buffer_->get(), name_buffer_->size());
 }
 
-void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
+void CodeEventLogger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
                                       AbstractCode* code, Name* name) {
   name_buffer_->Init(tag);
   name_buffer_->AppendName(name);
   LogRecordedBuffer(code, NULL, name_buffer_->get(), name_buffer_->size());
 }
 
-void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
+void CodeEventLogger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
                                       AbstractCode* code,
                                       SharedFunctionInfo* shared, Name* name) {
   name_buffer_->Init(tag);
@@ -187,7 +175,7 @@
   LogRecordedBuffer(code, shared, name_buffer_->get(), name_buffer_->size());
 }
 
-void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
+void CodeEventLogger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
                                       AbstractCode* code,
                                       SharedFunctionInfo* shared, Name* source,
                                       int line, int column) {
@@ -207,7 +195,7 @@
   LogRecordedBuffer(code, shared, name_buffer_->get(), name_buffer_->size());
 }
 
-void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
+void CodeEventLogger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
                                       AbstractCode* code, int args_count) {
   name_buffer_->Init(tag);
   name_buffer_->AppendInt(args_count);
@@ -216,7 +204,7 @@
 
 void CodeEventLogger::RegExpCodeCreateEvent(AbstractCode* code,
                                             String* source) {
-  name_buffer_->Init(Logger::REG_EXP_TAG);
+  name_buffer_->Init(CodeEventListener::REG_EXP_TAG);
   name_buffer_->AppendString(source);
   LogRecordedBuffer(code, NULL, name_buffer_->get(), name_buffer_->size());
 }
@@ -240,10 +228,6 @@
   static const char kFilenameFormatString[];
   static const int kFilenameBufferPadding;
 
-  // File buffer size of the low-level log. We don't use the default to
-  // minimize the associated overhead.
-  static const int kLogBufferSize = 2 * MB;
-
   FILE* perf_output_handle_;
 };
 
@@ -264,7 +248,7 @@
   perf_output_handle_ =
       base::OS::FOpen(perf_dump_name.start(), base::OS::LogFileOpenMode);
   CHECK_NOT_NULL(perf_output_handle_);
-  setvbuf(perf_output_handle_, NULL, _IOFBF, kLogBufferSize);
+  setvbuf(perf_output_handle_, NULL, _IOLBF, 0);
 }
 
 
@@ -335,10 +319,6 @@
   // Extension added to V8 log file name to get the low-level log name.
   static const char kLogExt[];
 
-  // File buffer size of the low-level log. We don't use the default to
-  // minimize the associated overhead.
-  static const int kLogBufferSize = 2 * MB;
-
   void LogCodeInfo();
   void LogWriteBytes(const char* bytes, int size);
 
@@ -363,7 +343,7 @@
   MemCopy(ll_name.start() + len, kLogExt, sizeof(kLogExt));
   ll_output_handle_ =
       base::OS::FOpen(ll_name.start(), base::OS::LogFileOpenMode);
-  setvbuf(ll_output_handle_, NULL, _IOFBF, kLogBufferSize);
+  setvbuf(ll_output_handle_, NULL, _IOLBF, 0);
 
   LogCodeInfo();
 }
@@ -539,6 +519,31 @@
 }
 
 
+// TODO(lpy): Keeping sampling thread inside V8 is a workaround currently,
+// the reason is to reduce code duplication during migration to sampler library,
+// sampling thread, as well as the sampler, will be moved to D8 eventually.
+class SamplingThread : public base::Thread {
+ public:
+  static const int kSamplingThreadStackSize = 64 * KB;
+
+  SamplingThread(sampler::Sampler* sampler, int interval)
+      : base::Thread(base::Thread::Options("SamplingThread",
+                                           kSamplingThreadStackSize)),
+        sampler_(sampler),
+        interval_(interval) {}
+  void Run() override {
+    while (sampler_->IsProfiling()) {
+      sampler_->DoSample();
+      base::OS::Sleep(base::TimeDelta::FromMilliseconds(interval_));
+    }
+  }
+
+ private:
+  sampler::Sampler* sampler_;
+  const int interval_;
+};
+
+
 // The Profiler samples pc and sp values for the main thread.
 // Each sample is appended to a circular buffer.
 // An independent thread removes data and writes it to the log.
@@ -611,16 +616,16 @@
 // Ticker used to provide ticks to the profiler and the sliding state
 // window.
 //
-class Ticker: public Sampler {
+class Ticker: public sampler::Sampler {
  public:
   Ticker(Isolate* isolate, int interval):
-      Sampler(isolate, interval),
-      profiler_(NULL) {}
+      sampler::Sampler(reinterpret_cast<v8::Isolate*>(isolate)),
+      profiler_(NULL),
+      sampling_thread_(new SamplingThread(this, interval)) {}
 
-  ~Ticker() { if (IsActive()) Stop(); }
-
-  virtual void Tick(TickSample* sample) {
-    if (profiler_) profiler_->Insert(sample);
+  ~Ticker() {
+    if (IsActive()) Stop();
+    delete sampling_thread_;
   }
 
   void SetProfiler(Profiler* profiler) {
@@ -628,16 +633,40 @@
     profiler_ = profiler;
     IncreaseProfilingDepth();
     if (!IsActive()) Start();
+    sampling_thread_->StartSynchronously();
   }
 
   void ClearProfiler() {
     profiler_ = NULL;
     if (IsActive()) Stop();
     DecreaseProfilingDepth();
+    sampling_thread_->Join();
+  }
+
+  void SampleStack(const v8::RegisterState& state) override {
+    v8::Isolate* v8_isolate = isolate();
+    Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+#if defined(USE_SIMULATOR)
+    SimulatorHelper::FillRegisters(isolate,
+                                   const_cast<v8::RegisterState*>(&state));
+#endif
+    TickSample* sample = isolate->cpu_profiler()->StartTickSample();
+    TickSample sample_obj;
+    if (sample == NULL) sample = &sample_obj;
+    sample->Init(isolate, state, TickSample::kIncludeCEntryFrame, true);
+    if (is_counting_samples_ && !sample->timestamp.IsNull()) {
+      if (sample->state == JS) ++js_sample_count_;
+      if (sample->state == EXTERNAL) ++external_sample_count_;
+    }
+    if (profiler_) profiler_->Insert(sample);
+    if (sample != &sample_obj) {
+      isolate->cpu_profiler()->FinishTickSample();
+    }
   }
 
  private:
   Profiler* profiler_;
+  SamplingThread* sampling_thread_;
 };
 
 
@@ -733,24 +762,16 @@
   delete log_;
 }
 
-
 void Logger::addCodeEventListener(CodeEventListener* listener) {
-  DCHECK(!hasCodeEventListener(listener));
-  listeners_.Add(listener);
+  bool result = isolate_->code_event_dispatcher()->AddListener(listener);
+  USE(result);
+  DCHECK(result);
 }
 
-
 void Logger::removeCodeEventListener(CodeEventListener* listener) {
-  DCHECK(hasCodeEventListener(listener));
-  listeners_.RemoveElement(listener);
+  isolate_->code_event_dispatcher()->RemoveListener(listener);
 }
 
-
-bool Logger::hasCodeEventListener(CodeEventListener* listener) {
-  return listeners_.Contains(listener);
-}
-
-
 void Logger::ProfilerBeginEvent() {
   if (!log_->IsEnabled()) return;
   Log::MessageBuilder msg(log_);
@@ -801,7 +822,7 @@
 void Logger::HandleEvent(const char* name, Object** location) {
   if (!log_->IsEnabled() || !FLAG_log_handles) return;
   Log::MessageBuilder msg(log_);
-  msg.Append("%s,%p", name, location);
+  msg.Append("%s,%p", name, static_cast<void*>(location));
   msg.WriteToLogFile();
 }
 
@@ -838,7 +859,6 @@
 
 
 void Logger::CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta) {
-  PROFILER_LOG(CodeDeoptEvent(code, pc, fp_to_sp_delta));
   if (!log_->IsEnabled() || !FLAG_log_internal_timer_events) return;
   Log::MessageBuilder msg(log_);
   int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds());
@@ -926,19 +946,19 @@
   // global flag
   Handle<Object> global =
       JSReceiver::GetProperty(isolate, regexp, "global").ToHandleChecked();
-  if (global->IsTrue()) {
+  if (global->IsTrue(isolate)) {
     msg->Append('g');
   }
   // ignorecase flag
   Handle<Object> ignorecase =
       JSReceiver::GetProperty(isolate, regexp, "ignoreCase").ToHandleChecked();
-  if (ignorecase->IsTrue()) {
+  if (ignorecase->IsTrue(isolate)) {
     msg->Append('i');
   }
   // multiline flag
   Handle<Object> multiline =
       JSReceiver::GetProperty(isolate, regexp, "multiline").ToHandleChecked();
-  if (multiline->IsTrue()) {
+  if (multiline->IsTrue(isolate)) {
     msg->Append('m');
   }
 }
@@ -971,7 +991,7 @@
   } else {
     Symbol* symbol = Symbol::cast(name);
     uint32_t hash = symbol->Hash();
-    if (symbol->name()->IsUndefined()) {
+    if (symbol->name()->IsUndefined(symbol->GetIsolate())) {
       ApiEvent("api,%s,\"%s\",symbol(hash %x)", tag, class_name.get(), hash);
     } else {
       base::SmartArrayPointer<char> str =
@@ -1030,8 +1050,8 @@
   if (!FLAG_log_code || !log_->IsEnabled()) return;
   Log::MessageBuilder msg(log_);
   msg.Append("%s,%s,-2,",
-             kLogEventsNames[CODE_CREATION_EVENT],
-             kLogEventsNames[CALLBACK_TAG]);
+             kLogEventsNames[CodeEventListener::CODE_CREATION_EVENT],
+             kLogEventsNames[CodeEventListener::CALLBACK_TAG]);
   msg.AppendAddress(entry_point);
   if (name->IsString()) {
     base::SmartArrayPointer<char> str =
@@ -1039,7 +1059,7 @@
     msg.Append(",1,\"%s%s\"", prefix, str.get());
   } else {
     Symbol* symbol = Symbol::cast(name);
-    if (symbol->name()->IsUndefined()) {
+    if (symbol->name()->IsUndefined(symbol->GetIsolate())) {
       msg.Append(",1,symbol(hash %x)", symbol->Hash());
     } else {
       base::SmartArrayPointer<char> str =
@@ -1054,41 +1074,33 @@
 
 
 void Logger::CallbackEvent(Name* name, Address entry_point) {
-  PROFILER_LOG(CallbackEvent(name, entry_point));
   CallbackEventInternal("", name, entry_point);
 }
 
 
 void Logger::GetterCallbackEvent(Name* name, Address entry_point) {
-  PROFILER_LOG(GetterCallbackEvent(name, entry_point));
   CallbackEventInternal("get ", name, entry_point);
 }
 
 
 void Logger::SetterCallbackEvent(Name* name, Address entry_point) {
-  PROFILER_LOG(SetterCallbackEvent(name, entry_point));
   CallbackEventInternal("set ", name, entry_point);
 }
 
 static void AppendCodeCreateHeader(Log::MessageBuilder* msg,
-                                   Logger::LogEventsAndTags tag,
+                                   CodeEventListener::LogEventsAndTags tag,
                                    AbstractCode* code) {
   DCHECK(msg);
   msg->Append("%s,%s,%d,",
-              kLogEventsNames[Logger::CODE_CREATION_EVENT],
-              kLogEventsNames[tag],
-              code->kind());
+              kLogEventsNames[CodeEventListener::CODE_CREATION_EVENT],
+              kLogEventsNames[tag], code->kind());
   msg->AppendAddress(code->address());
   msg->Append(",%d,", code->ExecutableSize());
 }
 
-void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
-                             const char* comment) {
-  PROFILER_LOG(CodeCreateEvent(tag, code, comment));
-
+void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+                             AbstractCode* code, const char* comment) {
   if (!is_logging_code_events()) return;
-  CALL_LISTENERS(CodeCreateEvent(tag, code, comment));
-
   if (!FLAG_log_code || !log_->IsEnabled()) return;
   Log::MessageBuilder msg(log_);
   AppendCodeCreateHeader(&msg, tag, code);
@@ -1096,13 +1108,9 @@
   msg.WriteToLogFile();
 }
 
-void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
-                             Name* name) {
-  PROFILER_LOG(CodeCreateEvent(tag, code, name));
-
+void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+                             AbstractCode* code, Name* name) {
   if (!is_logging_code_events()) return;
-  CALL_LISTENERS(CodeCreateEvent(tag, code, name));
-
   if (!FLAG_log_code || !log_->IsEnabled()) return;
   Log::MessageBuilder msg(log_);
   AppendCodeCreateHeader(&msg, tag, code);
@@ -1116,13 +1124,10 @@
   msg.WriteToLogFile();
 }
 
-void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
-                             SharedFunctionInfo* shared, Name* name) {
-  PROFILER_LOG(CodeCreateEvent(tag, code, shared, name));
-
+void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+                             AbstractCode* code, SharedFunctionInfo* shared,
+                             Name* name) {
   if (!is_logging_code_events()) return;
-  CALL_LISTENERS(CodeCreateEvent(tag, code, shared, name));
-
   if (!FLAG_log_code || !log_->IsEnabled()) return;
   if (code == AbstractCode::cast(
                   isolate_->builtins()->builtin(Builtins::kCompileLazy))) {
@@ -1148,14 +1153,10 @@
 // Although, it is possible to extract source and line from
 // the SharedFunctionInfo object, we left it to caller
 // to leave logging functions free from heap allocations.
-void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
-                             SharedFunctionInfo* shared, Name* source, int line,
-                             int column) {
-  PROFILER_LOG(CodeCreateEvent(tag, code, shared, source, line, column));
-
+void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+                             AbstractCode* code, SharedFunctionInfo* shared,
+                             Name* source, int line, int column) {
   if (!is_logging_code_events()) return;
-  CALL_LISTENERS(CodeCreateEvent(tag, code, shared, source, line, column));
-
   if (!FLAG_log_code || !log_->IsEnabled()) return;
   Log::MessageBuilder msg(log_);
   AppendCodeCreateHeader(&msg, tag, code);
@@ -1175,13 +1176,9 @@
   msg.WriteToLogFile();
 }
 
-void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
-                             int args_count) {
-  PROFILER_LOG(CodeCreateEvent(tag, code, args_count));
-
+void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+                             AbstractCode* code, int args_count) {
   if (!is_logging_code_events()) return;
-  CALL_LISTENERS(CodeCreateEvent(tag, code, args_count));
-
   if (!FLAG_log_code || !log_->IsEnabled()) return;
   Log::MessageBuilder msg(log_);
   AppendCodeCreateHeader(&msg, tag, code);
@@ -1191,14 +1188,10 @@
 
 void Logger::CodeDisableOptEvent(AbstractCode* code,
                                  SharedFunctionInfo* shared) {
-  PROFILER_LOG(CodeDisableOptEvent(code, shared));
-
   if (!is_logging_code_events()) return;
-  CALL_LISTENERS(CodeDisableOptEvent(code, shared));
-
   if (!FLAG_log_code || !log_->IsEnabled()) return;
   Log::MessageBuilder msg(log_);
-  msg.Append("%s,", kLogEventsNames[CODE_DISABLE_OPT_EVENT]);
+  msg.Append("%s,", kLogEventsNames[CodeEventListener::CODE_DISABLE_OPT_EVENT]);
   base::SmartArrayPointer<char> name =
       shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
   msg.Append("\"%s\",", name.get());
@@ -1208,23 +1201,16 @@
 
 
 void Logger::CodeMovingGCEvent() {
-  PROFILER_LOG(CodeMovingGCEvent());
-
   if (!is_logging_code_events()) return;
   if (!log_->IsEnabled() || !FLAG_ll_prof) return;
-  CALL_LISTENERS(CodeMovingGCEvent());
   base::OS::SignalCodeMovingGC();
 }
 
 void Logger::RegExpCodeCreateEvent(AbstractCode* code, String* source) {
-  PROFILER_LOG(RegExpCodeCreateEvent(code, source));
-
   if (!is_logging_code_events()) return;
-  CALL_LISTENERS(RegExpCodeCreateEvent(code, source));
-
   if (!FLAG_log_code || !log_->IsEnabled()) return;
   Log::MessageBuilder msg(log_);
-  AppendCodeCreateHeader(&msg, REG_EXP_TAG, code);
+  AppendCodeCreateHeader(&msg, CodeEventListener::REG_EXP_TAG, code);
   msg.Append('"');
   msg.AppendDetailed(source, false);
   msg.Append('"');
@@ -1232,11 +1218,8 @@
 }
 
 void Logger::CodeMoveEvent(AbstractCode* from, Address to) {
-  PROFILER_LOG(CodeMoveEvent(from, to));
-
   if (!is_logging_code_events()) return;
-  CALL_LISTENERS(CodeMoveEvent(from, to));
-  MoveEventInternal(CODE_MOVE_EVENT, from->address(), to);
+  MoveEventInternal(CodeEventListener::CODE_MOVE_EVENT, from->address(), to);
 }
 
 void Logger::CodeLinePosInfoAddPositionEvent(void* jit_handler_data,
@@ -1273,7 +1256,8 @@
 void Logger::CodeNameEvent(Address addr, int pos, const char* code_name) {
   if (code_name == NULL) return;  // Not a code object.
   Log::MessageBuilder msg(log_);
-  msg.Append("%s,%d,", kLogEventsNames[SNAPSHOT_CODE_NAME_EVENT], pos);
+  msg.Append("%s,%d,",
+             kLogEventsNames[CodeEventListener::SNAPSHOT_CODE_NAME_EVENT], pos);
   msg.AppendDoubleQuotedString(code_name);
   msg.WriteToLogFile();
 }
@@ -1281,13 +1265,11 @@
 
 void Logger::SharedFunctionInfoMoveEvent(Address from, Address to) {
   if (!is_logging_code_events()) return;
-  MoveEventInternal(SHARED_FUNC_MOVE_EVENT, from, to);
+  MoveEventInternal(CodeEventListener::SHARED_FUNC_MOVE_EVENT, from, to);
 }
 
-
-void Logger::MoveEventInternal(LogEventsAndTags event,
-                               Address from,
-                               Address to) {
+void Logger::MoveEventInternal(CodeEventListener::LogEventsAndTags event,
+                               Address from, Address to) {
   if (!FLAG_log_code || !log_->IsEnabled()) return;
   Log::MessageBuilder msg(log_);
   msg.Append("%s,", kLogEventsNames[event]);
@@ -1381,11 +1363,25 @@
   msg.WriteToLogFile();
 }
 
+void Logger::RuntimeCallTimerEvent() {
+  RuntimeCallStats* stats = isolate_->counters()->runtime_call_stats();
+  RuntimeCallTimer* timer = stats->current_timer();
+  if (timer == nullptr) return;
+  RuntimeCallCounter* counter = timer->counter();
+  if (counter == nullptr) return;
+  Log::MessageBuilder msg(log_);
+  msg.Append("active-runtime-timer,");
+  msg.AppendDoubleQuotedString(counter->name);
+  msg.WriteToLogFile();
+}
 
 void Logger::TickEvent(TickSample* sample, bool overflow) {
   if (!log_->IsEnabled() || !FLAG_prof_cpp) return;
+  if (FLAG_runtime_call_stats) {
+    RuntimeCallTimerEvent();
+  }
   Log::MessageBuilder msg(log_);
-  msg.Append("%s,", kLogEventsNames[TICK_EVENT]);
+  msg.Append("%s,", kLogEventsNames[CodeEventListener::TICK_EVENT]);
   msg.AppendAddress(sample->pc);
   msg.Append(",%d", static_cast<int>(timer_.Elapsed().InMicroseconds()));
   if (sample->has_external_callback) {
@@ -1412,6 +1408,7 @@
   if (profiler_ != NULL) {
     profiler_->pause();
     is_logging_ = false;
+    removeCodeEventListener(this);
   }
 }
 
@@ -1493,7 +1490,7 @@
 
 void Logger::LogCodeObject(Object* object) {
   AbstractCode* code_object = AbstractCode::cast(object);
-  LogEventsAndTags tag = Logger::STUB_TAG;
+  CodeEventListener::LogEventsAndTags tag = CodeEventListener::STUB_TAG;
   const char* description = "Unknown code from the snapshot";
   switch (code_object->kind()) {
     case AbstractCode::FUNCTION:
@@ -1511,53 +1508,59 @@
           CodeStub::MajorName(CodeStub::GetMajorKey(code_object->GetCode()));
       if (description == NULL)
         description = "A stub from the snapshot";
-      tag = Logger::STUB_TAG;
+      tag = CodeEventListener::STUB_TAG;
       break;
     case AbstractCode::REGEXP:
       description = "Regular expression code";
-      tag = Logger::REG_EXP_TAG;
+      tag = CodeEventListener::REG_EXP_TAG;
       break;
     case AbstractCode::BUILTIN:
       description =
           isolate_->builtins()->name(code_object->GetCode()->builtin_index());
-      tag = Logger::BUILTIN_TAG;
+      tag = CodeEventListener::BUILTIN_TAG;
       break;
     case AbstractCode::HANDLER:
       description = "An IC handler from the snapshot";
-      tag = Logger::HANDLER_TAG;
+      tag = CodeEventListener::HANDLER_TAG;
       break;
     case AbstractCode::KEYED_LOAD_IC:
       description = "A keyed load IC from the snapshot";
-      tag = Logger::KEYED_LOAD_IC_TAG;
+      tag = CodeEventListener::KEYED_LOAD_IC_TAG;
       break;
     case AbstractCode::LOAD_IC:
       description = "A load IC from the snapshot";
-      tag = Logger::LOAD_IC_TAG;
+      tag = CodeEventListener::LOAD_IC_TAG;
+      break;
+    case AbstractCode::LOAD_GLOBAL_IC:
+      description = "A load global IC from the snapshot";
+      tag = Logger::LOAD_GLOBAL_IC_TAG;
       break;
     case AbstractCode::CALL_IC:
       description = "A call IC from the snapshot";
-      tag = Logger::CALL_IC_TAG;
+      tag = CodeEventListener::CALL_IC_TAG;
       break;
     case AbstractCode::STORE_IC:
       description = "A store IC from the snapshot";
-      tag = Logger::STORE_IC_TAG;
+      tag = CodeEventListener::STORE_IC_TAG;
       break;
     case AbstractCode::KEYED_STORE_IC:
       description = "A keyed store IC from the snapshot";
-      tag = Logger::KEYED_STORE_IC_TAG;
+      tag = CodeEventListener::KEYED_STORE_IC_TAG;
       break;
     case AbstractCode::WASM_FUNCTION:
       description = "A Wasm function";
-      tag = Logger::STUB_TAG;
+      tag = CodeEventListener::STUB_TAG;
       break;
     case AbstractCode::JS_TO_WASM_FUNCTION:
       description = "A JavaScript to Wasm adapter";
-      tag = Logger::STUB_TAG;
+      tag = CodeEventListener::STUB_TAG;
       break;
     case AbstractCode::WASM_TO_JS_FUNCTION:
       description = "A Wasm to JavaScript adapter";
-      tag = Logger::STUB_TAG;
+      tag = CodeEventListener::STUB_TAG;
       break;
+    case AbstractCode::NUMBER_OF_KINDS:
+      UNIMPLEMENTED();
   }
   PROFILE(isolate_, CodeCreateEvent(tag, code_object, description));
 }
@@ -1593,8 +1596,9 @@
         Code* code = interpreter->GetBytecodeHandler(bytecode, operand_scale);
         std::string bytecode_name =
             interpreter::Bytecodes::ToString(bytecode, operand_scale);
-        CodeCreateEvent(Logger::BYTECODE_HANDLER_TAG, AbstractCode::cast(code),
-                        bytecode_name.c_str());
+        PROFILE(isolate_, CodeCreateEvent(
+                              CodeEventListener::BYTECODE_HANDLER_TAG,
+                              AbstractCode::cast(code), bytecode_name.c_str()));
       }
     }
   }
@@ -1613,26 +1617,28 @@
       if (line_num > 0) {
         PROFILE(isolate_,
                 CodeCreateEvent(
-                    Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
+                    Logger::ToNativeByScript(
+                        CodeEventListener::LAZY_COMPILE_TAG, *script),
                     *code, *shared, *script_name, line_num, column_num));
       } else {
         // Can't distinguish eval and script here, so always use Script.
-        PROFILE(isolate_, CodeCreateEvent(Logger::ToNativeByScript(
-                                              Logger::SCRIPT_TAG, *script),
-                                          *code, *shared, *script_name));
+        PROFILE(isolate_,
+                CodeCreateEvent(Logger::ToNativeByScript(
+                                    CodeEventListener::SCRIPT_TAG, *script),
+                                *code, *shared, *script_name));
       }
     } else {
       PROFILE(isolate_,
-              CodeCreateEvent(
-                  Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
-                  *code, *shared, isolate_->heap()->empty_string(), line_num,
-                  column_num));
+              CodeCreateEvent(Logger::ToNativeByScript(
+                                  CodeEventListener::LAZY_COMPILE_TAG, *script),
+                              *code, *shared, isolate_->heap()->empty_string(),
+                              line_num, column_num));
     }
   } else if (shared->IsApiFunction()) {
     // API function.
     FunctionTemplateInfo* fun_data = shared->get_api_func_data();
     Object* raw_call_data = fun_data->call_code();
-    if (!raw_call_data->IsUndefined()) {
+    if (!raw_call_data->IsUndefined(isolate_)) {
       CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
       Object* callback_obj = call_data->callback();
       Address entry_point = v8::ToCData<Address>(callback_obj);
@@ -1642,8 +1648,8 @@
       PROFILE(isolate_, CallbackEvent(*func_name, entry_point));
     }
   } else {
-    PROFILE(isolate_, CodeCreateEvent(Logger::LAZY_COMPILE_TAG, *code, *shared,
-                                      *func_name));
+    PROFILE(isolate_, CodeCreateEvent(CodeEventListener::LAZY_COMPILE_TAG,
+                                      *code, *shared, *func_name));
   }
 }
 
@@ -1756,7 +1762,6 @@
   PrepareLogFileName(log_file_name, isolate, FLAG_logfile);
   log_->Initialize(log_file_name.str().c_str());
 
-
   if (FLAG_perf_basic_prof) {
     perf_basic_logger_ = new PerfBasicLogger();
     addCodeEventListener(perf_basic_logger_);
@@ -1786,6 +1791,12 @@
     profiler_->Engage();
   }
 
+  profiler_listener_.reset();
+
+  if (is_logging_) {
+    addCodeEventListener(this);
+  }
+
   return true;
 }
 
@@ -1809,8 +1820,20 @@
   }
 }
 
+void Logger::SetUpProfilerListener() {
+  if (!is_initialized_) return;
+  if (profiler_listener_.get() == nullptr) {
+    profiler_listener_.reset(new ProfilerListener(isolate_));
+  }
+  addCodeEventListener(profiler_listener_.get());
+}
 
-Sampler* Logger::sampler() {
+void Logger::TearDownProfilerListener() {
+  if (profiler_listener_->HasObservers()) return;
+  removeCodeEventListener(profiler_listener_.get());
+}
+
+sampler::Sampler* Logger::sampler() {
   return ticker_;
 }
 
@@ -1853,6 +1876,10 @@
     jit_logger_ = NULL;
   }
 
+  if (profiler_listener_.get() != nullptr) {
+    removeCodeEventListener(profiler_listener_.get());
+  }
+
   return log_->Close();
 }
 
diff --git a/src/log.h b/src/log.h
index 9953b4c..303e352 100644
--- a/src/log.h
+++ b/src/log.h
@@ -11,6 +11,8 @@
 #include "src/base/compiler-specific.h"
 #include "src/base/platform/elapsed-timer.h"
 #include "src/base/platform/platform.h"
+#include "src/code-events.h"
+#include "src/isolate.h"
 #include "src/objects.h"
 
 namespace v8 {
@@ -19,6 +21,10 @@
 class Semaphore;
 }
 
+namespace sampler {
+class Sampler;
+}
+
 namespace internal {
 
 // Logger is used for collecting logging information from V8 during
@@ -64,96 +70,31 @@
 class Profiler;
 class Ticker;
 struct TickSample;
+class RuntimeCallTimer;
 
 #undef LOG
-#define LOG(isolate, Call)                          \
-  do {                                              \
-    v8::internal::Logger* logger =                  \
-        (isolate)->logger();                        \
-    if (logger->is_logging())                       \
-      logger->Call;                                 \
+#define LOG(isolate, Call)                              \
+  do {                                                  \
+    v8::internal::Logger* logger = (isolate)->logger(); \
+    if (logger->is_logging()) logger->Call;             \
   } while (false)
 
-#define LOG_CODE_EVENT(isolate, Call)               \
-  do {                                              \
-    v8::internal::Logger* logger =                  \
-        (isolate)->logger();                        \
-    if (logger->is_logging_code_events())           \
-      logger->Call;                                 \
+#define LOG_CODE_EVENT(isolate, Call)                   \
+  do {                                                  \
+    v8::internal::Logger* logger = (isolate)->logger(); \
+    if (logger->is_logging_code_events()) logger->Call; \
   } while (false)
 
-#define LOG_EVENTS_AND_TAGS_LIST(V)                                      \
-  V(CODE_CREATION_EVENT, "code-creation")                                \
-  V(CODE_DISABLE_OPT_EVENT, "code-disable-optimization")                 \
-  V(CODE_MOVE_EVENT, "code-move")                                        \
-  V(CODE_DELETE_EVENT, "code-delete")                                    \
-  V(CODE_MOVING_GC, "code-moving-gc")                                    \
-  V(SHARED_FUNC_MOVE_EVENT, "sfi-move")                                  \
-  V(SNAPSHOT_CODE_NAME_EVENT, "snapshot-code-name")                      \
-  V(TICK_EVENT, "tick")                                                  \
-  V(REPEAT_META_EVENT, "repeat")                                         \
-  V(BUILTIN_TAG, "Builtin")                                              \
-  V(CALL_DEBUG_BREAK_TAG, "CallDebugBreak")                              \
-  V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn")            \
-  V(CALL_INITIALIZE_TAG, "CallInitialize")                               \
-  V(CALL_MEGAMORPHIC_TAG, "CallMegamorphic")                             \
-  V(CALL_MISS_TAG, "CallMiss")                                           \
-  V(CALL_NORMAL_TAG, "CallNormal")                                       \
-  V(LOAD_INITIALIZE_TAG, "LoadInitialize")                               \
-  V(LOAD_MEGAMORPHIC_TAG, "LoadMegamorphic")                             \
-  V(STORE_INITIALIZE_TAG, "StoreInitialize")                             \
-  V(STORE_GENERIC_TAG, "StoreGeneric")                                   \
-  V(STORE_MEGAMORPHIC_TAG, "StoreMegamorphic")                           \
-  V(KEYED_CALL_DEBUG_BREAK_TAG, "KeyedCallDebugBreak")                   \
-  V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG, "KeyedCallDebugPrepareStepIn") \
-  V(KEYED_CALL_INITIALIZE_TAG, "KeyedCallInitialize")                    \
-  V(KEYED_CALL_MEGAMORPHIC_TAG, "KeyedCallMegamorphic")                  \
-  V(KEYED_CALL_MISS_TAG, "KeyedCallMiss")                                \
-  V(KEYED_CALL_NORMAL_TAG, "KeyedCallNormal")                            \
-  V(CALLBACK_TAG, "Callback")                                            \
-  V(EVAL_TAG, "Eval")                                                    \
-  V(FUNCTION_TAG, "Function")                                            \
-  V(HANDLER_TAG, "Handler")                                              \
-  V(BYTECODE_HANDLER_TAG, "BytecodeHandler")                             \
-  V(KEYED_LOAD_IC_TAG, "KeyedLoadIC")                                    \
-  V(KEYED_LOAD_POLYMORPHIC_IC_TAG, "KeyedLoadPolymorphicIC")             \
-  V(KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG, "KeyedExternalArrayLoadIC")        \
-  V(KEYED_STORE_IC_TAG, "KeyedStoreIC")                                  \
-  V(KEYED_STORE_POLYMORPHIC_IC_TAG, "KeyedStorePolymorphicIC")           \
-  V(KEYED_EXTERNAL_ARRAY_STORE_IC_TAG, "KeyedExternalArrayStoreIC")      \
-  V(LAZY_COMPILE_TAG, "LazyCompile")                                     \
-  V(CALL_IC_TAG, "CallIC")                                               \
-  V(LOAD_IC_TAG, "LoadIC")                                               \
-  V(LOAD_POLYMORPHIC_IC_TAG, "LoadPolymorphicIC")                        \
-  V(REG_EXP_TAG, "RegExp")                                               \
-  V(SCRIPT_TAG, "Script")                                                \
-  V(STORE_IC_TAG, "StoreIC")                                             \
-  V(STORE_POLYMORPHIC_IC_TAG, "StorePolymorphicIC")                      \
-  V(STUB_TAG, "Stub")                                                    \
-  V(NATIVE_FUNCTION_TAG, "Function")                                     \
-  V(NATIVE_LAZY_COMPILE_TAG, "LazyCompile")                              \
-  V(NATIVE_SCRIPT_TAG, "Script")
-// Note that 'NATIVE_' cases for functions and scripts are mapped onto
-// original tags when writing to the log.
-
-
 class JitLogger;
 class PerfBasicLogger;
 class LowLevelLogger;
 class PerfJitLogger;
-class Sampler;
+class ProfilerListener;
 
-class Logger {
+class Logger : public CodeEventListener {
  public:
   enum StartEnd { START = 0, END = 1 };
 
-#define DECLARE_ENUM(enum_item, ignore) enum_item,
-  enum LogEventsAndTags {
-    LOG_EVENTS_AND_TAGS_LIST(DECLARE_ENUM)
-    NUMBER_OF_LOG_EVENTS
-  };
-#undef DECLARE_ENUM
-
   // Acquires resources for logging if the right flags are set.
   bool SetUp(Isolate* isolate);
 
@@ -161,7 +102,15 @@
   void SetCodeEventHandler(uint32_t options,
                            JitCodeEventHandler event_handler);
 
-  Sampler* sampler();
+  // Sets up ProfilerListener.
+  void SetUpProfilerListener();
+
+  // Tear down ProfilerListener if it has no observers.
+  void TearDownProfilerListener();
+
+  sampler::Sampler* sampler();
+
+  ProfilerListener* profiler_listener() { return profiler_listener_.get(); }
 
   // Frees resources acquired in SetUp.
   // When a temporary file is used for the log, returns its stream descriptor,
@@ -207,28 +156,27 @@
   void ApiObjectAccess(const char* tag, JSObject* obj);
   void ApiEntryCall(const char* name);
 
-
   // ==== Events logged by --log-code. ====
   void addCodeEventListener(CodeEventListener* listener);
   void removeCodeEventListener(CodeEventListener* listener);
-  bool hasCodeEventListener(CodeEventListener* listener);
-
 
   // Emits a code event for a callback function.
   void CallbackEvent(Name* name, Address entry_point);
   void GetterCallbackEvent(Name* name, Address entry_point);
   void SetterCallbackEvent(Name* name, Address entry_point);
   // Emits a code create event.
-  void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
-                       const char* source);
-  void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code, Name* name);
-  void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
-                       SharedFunctionInfo* shared, Name* name);
-  void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
-                       SharedFunctionInfo* shared, Name* source, int line,
-                       int column);
-  void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
-                       int args_count);
+  void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+                       AbstractCode* code, const char* source);
+  void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+                       AbstractCode* code, Name* name);
+  void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+                       AbstractCode* code, SharedFunctionInfo* shared,
+                       Name* name);
+  void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+                       AbstractCode* code, SharedFunctionInfo* shared,
+                       Name* source, int line, int column);
+  void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+                       AbstractCode* code, int args_count);
   // Emits a code deoptimization event.
   void CodeDisableOptEvent(AbstractCode* code, SharedFunctionInfo* shared);
   void CodeMovingGCEvent();
@@ -255,6 +203,8 @@
 
   void CodeNameEvent(Address addr, int pos, const char* code_name);
 
+  void CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta);
+
   // ==== Events logged by --log-gc. ====
   // Heap sampling events: start, end, and individual types.
   void HeapSampleBeginEvent(const char* space, const char* kind);
@@ -272,7 +222,6 @@
   void SharedLibraryEvent(const std::string& library_path, uintptr_t start,
                           uintptr_t end, intptr_t aslr_slide);
 
-  void CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta);
   void CurrentTimeEvent();
 
   void TimerEvent(StartEnd se, const char* name);
@@ -314,7 +263,8 @@
   void LogBytecodeHandlers();
 
   // Converts tag to a corresponding NATIVE_... if the script is native.
-  INLINE(static LogEventsAndTags ToNativeByScript(LogEventsAndTags, Script*));
+  INLINE(static CodeEventListener::LogEventsAndTags ToNativeByScript(
+      CodeEventListener::LogEventsAndTags, Script*));
 
   // Profiler's sampling interval (in milliseconds).
 #if defined(ANDROID)
@@ -341,16 +291,18 @@
                              Address entry_point);
 
   // Internal configurable move event.
-  void MoveEventInternal(LogEventsAndTags event, Address from, Address to);
+  void MoveEventInternal(CodeEventListener::LogEventsAndTags event,
+                         Address from, Address to);
 
   // Used for logging stubs found in the snapshot.
   void LogCodeObject(Object* code_object);
 
   // Helper method. It resets name_buffer_ and add tag name into it.
-  void InitNameBuffer(LogEventsAndTags tag);
+  void InitNameBuffer(CodeEventListener::LogEventsAndTags tag);
 
   // Emits a profiler tick event. Used by the profiler thread.
   void TickEvent(TickSample* sample, bool overflow);
+  void RuntimeCallTimerEvent();
 
   PRINTF_FORMAT(2, 3) void ApiEvent(const char* format, ...);
 
@@ -389,6 +341,7 @@
   PerfJitLogger* perf_jit_logger_;
   LowLevelLogger* ll_logger_;
   JitLogger* jit_logger_;
+  std::unique_ptr<ProfilerListener> profiler_listener_;
   List<CodeEventListener*> listeners_;
 
   // Guards against multiple calls to TearDown() that can happen in some tests.
@@ -458,47 +411,20 @@
   DISALLOW_COPY_AND_ASSIGN(PositionsRecorder);
 };
 
-class CodeEventListener {
- public:
-  virtual ~CodeEventListener() {}
-
-  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
-                               const char* comment) = 0;
-  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
-                               Name* name) = 0;
-  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
-                               SharedFunctionInfo* shared, Name* name) = 0;
-  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
-                               SharedFunctionInfo* shared, Name* source,
-                               int line, int column) = 0;
-  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
-                               int args_count) = 0;
-  virtual void CallbackEvent(Name* name, Address entry_point) = 0;
-  virtual void GetterCallbackEvent(Name* name, Address entry_point) = 0;
-  virtual void SetterCallbackEvent(Name* name, Address entry_point) = 0;
-  virtual void RegExpCodeCreateEvent(AbstractCode* code, String* source) = 0;
-  virtual void CodeMoveEvent(AbstractCode* from, Address to) = 0;
-  virtual void SharedFunctionInfoMoveEvent(Address from, Address to) = 0;
-  virtual void CodeMovingGCEvent() = 0;
-  virtual void CodeDisableOptEvent(AbstractCode* code,
-                                   SharedFunctionInfo* shared) = 0;
-};
-
-
 class CodeEventLogger : public CodeEventListener {
  public:
   CodeEventLogger();
   ~CodeEventLogger() override;
 
-  void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+  void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
                        const char* comment) override;
-  void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+  void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
                        Name* name) override;
-  void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+  void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
                        int args_count) override;
-  void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+  void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
                        SharedFunctionInfo* shared, Name* name) override;
-  void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+  void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
                        SharedFunctionInfo* shared, Name* source, int line,
                        int column) override;
   void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
@@ -508,6 +434,7 @@
   void SetterCallbackEvent(Name* name, Address entry_point) override {}
   void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
   void CodeMovingGCEvent() override {}
+  void CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta) override {}
 
  private:
   class NameBuffer;
diff --git a/src/lookup.cc b/src/lookup.cc
index bdb9f0a..8e545f7 100644
--- a/src/lookup.cc
+++ b/src/lookup.cc
@@ -131,7 +131,7 @@
     return result;
   }
   auto root = handle(receiver->GetRootMap(isolate)->prototype(), isolate);
-  if (root->IsNull()) {
+  if (root->IsNull(isolate)) {
     unsigned int magic = 0xbbbbbbbb;
     isolate->PushStackTraceAndDie(magic, *receiver, NULL, magic);
   }
@@ -299,7 +299,7 @@
       // Install a property cell.
       auto cell = JSGlobalObject::EnsurePropertyCell(
           Handle<JSGlobalObject>::cast(receiver), name());
-      DCHECK(cell->value()->IsTheHole());
+      DCHECK(cell->value()->IsTheHole(isolate_));
       transition_ = cell;
     } else {
       transition_ = map;
@@ -373,7 +373,7 @@
 void LookupIterator::TransitionToAccessorProperty(
     Handle<Object> getter, Handle<Object> setter,
     PropertyAttributes attributes) {
-  DCHECK(!getter->IsNull() || !setter->IsNull());
+  DCHECK(!getter->IsNull(isolate_) || !setter->IsNull(isolate_));
   // Can only be called when the receiver is a JSObject. JSProxy has to be
   // handled via a trap. Adding properties to primitive values is not
   // observable.
@@ -496,8 +496,7 @@
   if (!current->map()->has_hidden_prototype()) return false;
   // JSProxy do not occur as hidden prototypes.
   if (object->IsJSProxy()) return false;
-  PrototypeIterator iter(isolate(), current,
-                         PrototypeIterator::START_AT_PROTOTYPE,
+  PrototypeIterator iter(isolate(), current, kStartAtPrototype,
                          PrototypeIterator::END_AT_NON_HIDDEN);
   while (!iter.IsAtEnd()) {
     if (iter.GetCurrent<JSReceiver>() == object) return true;
@@ -693,7 +692,7 @@
         number_ = static_cast<uint32_t>(number);
         DCHECK(dict->ValueAt(number_)->IsPropertyCell());
         PropertyCell* cell = PropertyCell::cast(dict->ValueAt(number_));
-        if (cell->value()->IsTheHole()) return NOT_FOUND;
+        if (cell->value()->IsTheHole(isolate_)) return NOT_FOUND;
         property_details_ = cell->property_details();
         has_property_ = true;
         switch (property_details_.kind()) {
@@ -758,5 +757,21 @@
   return state_;
 }
 
+Handle<InterceptorInfo> LookupIterator::GetInterceptorForFailedAccessCheck()
+    const {
+  DCHECK_EQ(ACCESS_CHECK, state_);
+  DisallowHeapAllocation no_gc;
+  AccessCheckInfo* access_check_info =
+      AccessCheckInfo::Get(isolate_, Handle<JSObject>::cast(holder_));
+  if (access_check_info) {
+    Object* interceptor = IsElement() ? access_check_info->indexed_interceptor()
+                                      : access_check_info->named_interceptor();
+    if (interceptor) {
+      return handle(InterceptorInfo::cast(interceptor), isolate_);
+    }
+  }
+  return Handle<InterceptorInfo>();
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/lookup.h b/src/lookup.h
index bb65639..f1a1b7c 100644
--- a/src/lookup.h
+++ b/src/lookup.h
@@ -176,6 +176,7 @@
   Handle<Object> GetReceiver() const { return receiver_; }
 
   Handle<JSObject> GetStoreTarget() const {
+    DCHECK(receiver_->IsJSObject());
     if (receiver_->IsJSGlobalProxy()) {
       Map* map = JSGlobalProxy::cast(*receiver_)->map();
       if (map->has_hidden_prototype()) {
@@ -257,15 +258,15 @@
                     : GetInterceptor<false>(JSObject::cast(*holder_));
     return handle(result, isolate_);
   }
+  Handle<InterceptorInfo> GetInterceptorForFailedAccessCheck() const;
   Handle<Object> GetDataValue() const;
   void WriteDataValue(Handle<Object> value);
   inline void UpdateProtector() {
     if (IsElement()) return;
     if (*name_ == heap()->is_concat_spreadable_symbol() ||
-        (FLAG_harmony_species && (*name_ == heap()->constructor_string() ||
-                                  *name_ == heap()->species_symbol())) ||
-        (FLAG_harmony_instanceof &&
-         (*name_ == heap()->has_instance_symbol()))) {
+        *name_ == heap()->constructor_string() ||
+        *name_ == heap()->species_symbol() ||
+        *name_ == heap()->has_instance_symbol()) {
       InternalUpdateProtector();
     }
   }
diff --git a/src/messages.cc b/src/messages.cc
index 6e7c495..0dffbb4 100644
--- a/src/messages.cc
+++ b/src/messages.cc
@@ -98,7 +98,7 @@
     MaybeHandle<Object> maybe_stringified;
     Handle<Object> stringified;
     // Make sure we don't leak uncaught internally generated Error objects.
-    if (Object::IsErrorObject(isolate, argument)) {
+    if (argument->IsJSError()) {
       Handle<Object> args[] = {argument};
       maybe_stringified = Execution::TryCall(
           isolate, isolate->no_side_effects_to_string_fun(),
@@ -130,7 +130,7 @@
   } else {
     for (int i = 0; i < global_length; i++) {
       HandleScope scope(isolate);
-      if (global_listeners.get(i)->IsUndefined()) continue;
+      if (global_listeners.get(i)->IsUndefined(isolate)) continue;
       v8::NeanderObject listener(JSObject::cast(global_listeners.get(i)));
       Handle<Foreign> callback_obj(Foreign::cast(listener.get(0)));
       v8::MessageCallback callback =
@@ -139,7 +139,7 @@
       {
         // Do not allow exceptions to propagate.
         v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
-        callback(api_message_obj, callback_data->IsUndefined()
+        callback(api_message_obj, callback_data->IsUndefined(isolate)
                                       ? api_exception_obj
                                       : v8::Utils::ToLocal(callback_data));
       }
@@ -205,12 +205,8 @@
 
 Handle<Object> CallSite::GetFunctionName() {
   if (IsWasm()) {
-    if (wasm_obj_->IsUndefined()) return isolate_->factory()->null_value();
-    // wasm_obj_ can be a String if we generate WASM code directly in a test
-    // case.
-    if (wasm_obj_->IsString()) return wasm_obj_;
-    return wasm::GetWasmFunctionName(Handle<JSObject>::cast(wasm_obj_),
-                                     wasm_func_index_);
+    return wasm::GetWasmFunctionNameOrNull(isolate_, wasm_obj_,
+                                           wasm_func_index_);
   }
   Handle<String> result = JSFunction::GetName(fun_);
   if (result->length() != 0) return result;
@@ -253,7 +249,8 @@
 
 
 Handle<Object> CallSite::GetMethodName() {
-  if (!IsJavaScript() || receiver_->IsNull() || receiver_->IsUndefined()) {
+  if (!IsJavaScript() || receiver_->IsNull(isolate_) ||
+      receiver_->IsUndefined(isolate_)) {
     return isolate_->factory()->null_value();
   }
   Handle<JSReceiver> receiver =
@@ -268,13 +265,11 @@
     Handle<Name> name = Handle<Name>::cast(function_name);
     // ES2015 gives getters and setters name prefixes which must
     // be stripped to find the property name.
-    if (name->IsString() && FLAG_harmony_function_name) {
-      Handle<String> name_string = Handle<String>::cast(name);
-      if (name_string->IsUtf8EqualTo(CStrVector("get "), true) ||
-          name_string->IsUtf8EqualTo(CStrVector("set "), true)) {
-        name = isolate_->factory()->NewProperSubString(name_string, 4,
-                                                       name_string->length());
-      }
+    Handle<String> name_string = Handle<String>::cast(name);
+    if (name_string->IsUtf8EqualTo(CStrVector("get "), true) ||
+        name_string->IsUtf8EqualTo(CStrVector("set "), true)) {
+      name = isolate_->factory()->NewProperSubString(name_string, 4,
+                                                     name_string->length());
     }
     if (CheckMethodName(isolate_, obj, name, fun_,
                         LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR)) {
@@ -284,9 +279,8 @@
 
   HandleScope outer_scope(isolate_);
   Handle<Object> result;
-  for (PrototypeIterator iter(isolate_, obj,
-                              PrototypeIterator::START_AT_RECEIVER);
-       !iter.IsAtEnd(); iter.Advance()) {
+  for (PrototypeIterator iter(isolate_, obj, kStartAtReceiver); !iter.IsAtEnd();
+       iter.Advance()) {
     Handle<Object> current = PrototypeIterator::GetCurrent(iter);
     if (!current->IsJSObject()) break;
     Handle<JSObject> current_obj = Handle<JSObject>::cast(current);
@@ -345,8 +339,8 @@
 
 bool CallSite::IsToplevel() {
   if (IsWasm()) return false;
-  return receiver_->IsJSGlobalProxy() || receiver_->IsNull() ||
-         receiver_->IsUndefined();
+  return receiver_->IsJSGlobalProxy() || receiver_->IsNull(isolate_) ||
+         receiver_->IsUndefined(isolate_);
 }
 
 
diff --git a/src/messages.h b/src/messages.h
index a9f321e..682105d 100644
--- a/src/messages.h
+++ b/src/messages.h
@@ -97,7 +97,8 @@
   T(CalledOnNonObject, "% called on non-object")                               \
   T(CalledOnNullOrUndefined, "% called on null or undefined")                  \
   T(CallSiteExpectsFunction,                                                   \
-    "CallSite expects function or number as second argument, got %")           \
+    "CallSite expects wasm object as first or function as second argument, "   \
+    "got <%, %>")                                                              \
   T(CallSiteMethod, "CallSite method % expects CallSite as receiver")          \
   T(CannotConvertToPrimitive, "Cannot convert object to primitive value")      \
   T(CannotPreventExt, "Cannot prevent extensions")                             \
@@ -120,10 +121,8 @@
   T(DefineDisallowed, "Cannot define property:%, object is not extensible.")   \
   T(DetachedOperation, "Cannot perform % on a detached ArrayBuffer")           \
   T(DuplicateTemplateProperty, "Object template has duplicate property '%'")   \
-  T(ExtendsValueGenerator,                                                     \
-    "Class extends value % may not be a generator function")                   \
-  T(ExtendsValueNotFunction,                                                   \
-    "Class extends value % is not a function or null")                         \
+  T(ExtendsValueNotConstructor,                                                \
+    "Class extends value % is not a constructor or null")                      \
   T(FirstArgumentNotRegExp,                                                    \
     "First argument to % must not be a regular expression")                    \
   T(FunctionBind, "Bind must be called on a function")                         \
@@ -339,6 +338,8 @@
   T(InvalidTypedArrayAlignment, "% of % should be a multiple of %")            \
   T(InvalidTypedArrayLength, "Invalid typed array length")                     \
   T(InvalidTypedArrayOffset, "Start offset is too large:")                     \
+  T(InvalidSimdIndex, "Index out of bounds for SIMD operation")                \
+  T(InvalidSimdLaneValue, "Lane value out of bounds for SIMD operation")       \
   T(LetInLexicalBinding, "let is disallowed as a lexically bound name")        \
   T(LocaleMatcher, "Illegal value for localeMatcher:%")                        \
   T(NormalizationForm, "The normalization form should be one of %.")           \
@@ -439,6 +440,8 @@
     "Too many arguments in function call (only 65535 allowed)")                \
   T(TooManyParameters,                                                         \
     "Too many parameters in function definition (only 65535 allowed)")         \
+  T(TooManySpreads,                                                            \
+    "Literal containing too many nested spreads (up to 65534 allowed)")        \
   T(TooManyVariables, "Too many variables declared (only 4194303 allowed)")    \
   T(TypedArrayTooShort,                                                        \
     "Derived TypedArray constructor created an array which was too small")     \
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
index b463c0b..57aea38 100644
--- a/src/mips/assembler-mips-inl.h
+++ b/src/mips/assembler-mips-inl.h
@@ -463,6 +463,8 @@
   CheckTrampolinePoolQuick();
 }
 
+template <>
+inline void Assembler::EmitHelper(uint8_t x);
 
 template <typename T>
 void Assembler::EmitHelper(T x) {
@@ -471,6 +473,14 @@
   CheckTrampolinePoolQuick();
 }
 
+template <>
+void Assembler::EmitHelper(uint8_t x) {
+  *reinterpret_cast<uint8_t*>(pc_) = x;
+  pc_ += sizeof(x);
+  if (reinterpret_cast<intptr_t>(pc_) % kInstrSize == 0) {
+    CheckTrampolinePoolQuick();
+  }
+}
 
 void Assembler::emit(Instr x, CompactBranchType is_compact_branch) {
   if (!is_buffer_growth_blocked()) {
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index f95323b..8bda17c 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -194,36 +194,25 @@
   return Assembler::target_address_at(pc_, host_);
 }
 
+Address RelocInfo::wasm_global_reference() {
+  DCHECK(IsWasmGlobalReference(rmode_));
+  return Assembler::target_address_at(pc_, host_);
+}
+
 uint32_t RelocInfo::wasm_memory_size_reference() {
   DCHECK(IsWasmMemorySizeReference(rmode_));
   return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
 }
 
-void RelocInfo::update_wasm_memory_reference(
-    Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
-    ICacheFlushMode icache_flush_mode) {
-  DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
-  if (IsWasmMemoryReference(rmode_)) {
-    Address updated_memory_reference;
-    DCHECK(old_base <= wasm_memory_reference() &&
-           wasm_memory_reference() < old_base + old_size);
-    updated_memory_reference = new_base + (wasm_memory_reference() - old_base);
-    DCHECK(new_base <= updated_memory_reference &&
-           updated_memory_reference < new_base + new_size);
-    Assembler::set_target_address_at(
-        isolate_, pc_, host_, updated_memory_reference, icache_flush_mode);
-  } else if (IsWasmMemorySizeReference(rmode_)) {
-    uint32_t updated_size_reference;
-    DCHECK(wasm_memory_size_reference() <= old_size);
-    updated_size_reference =
-        new_size + (wasm_memory_size_reference() - old_size);
-    DCHECK(updated_size_reference <= new_size);
-    Assembler::set_target_address_at(
-        isolate_, pc_, host_, reinterpret_cast<Address>(updated_size_reference),
-        icache_flush_mode);
-  } else {
-    UNREACHABLE();
-  }
+void RelocInfo::unchecked_update_wasm_memory_reference(
+    Address address, ICacheFlushMode flush_mode) {
+  Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
+}
+
+void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
+                                                  ICacheFlushMode flush_mode) {
+  Assembler::set_target_address_at(isolate_, pc_, host_,
+                                   reinterpret_cast<Address>(size), flush_mode);
 }
 
 // -----------------------------------------------------------------------------
@@ -330,6 +319,8 @@
   desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
   desc->origin = this;
   desc->constant_pool_size = 0;
+  desc->unwinding_info_size = 0;
+  desc->unwinding_info = nullptr;
 }
 
 
@@ -1277,7 +1268,6 @@
 
 
 void Assembler::bal(int16_t offset) {
-  positions_recorder()->WriteRecordedPositions();
   bgezal(zero_reg, offset);
 }
 
@@ -1290,7 +1280,6 @@
 
 void Assembler::balc(int32_t offset) {
   DCHECK(IsMipsArchVariant(kMips32r6));
-  positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
 }
 
@@ -1337,7 +1326,6 @@
 void Assembler::bgezal(Register rs, int16_t offset) {
   DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
   BlockTrampolinePoolScope block_trampoline_pool(this);
-  positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
   BlockTrampolinePoolFor(1);  // For associated delay slot.
 }
@@ -1408,7 +1396,6 @@
 void Assembler::bltzal(Register rs, int16_t offset) {
   DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
   BlockTrampolinePoolScope block_trampoline_pool(this);
-  positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
   BlockTrampolinePoolFor(1);  // For associated delay slot.
 }
@@ -1444,7 +1431,6 @@
 void Assembler::blezalc(Register rt, int16_t offset) {
   DCHECK(IsMipsArchVariant(kMips32r6));
   DCHECK(!(rt.is(zero_reg)));
-  positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(BLEZ, zero_reg, rt, offset,
                     CompactBranchType::COMPACT_BRANCH);
 }
@@ -1453,7 +1439,6 @@
 void Assembler::bgezalc(Register rt, int16_t offset) {
   DCHECK(IsMipsArchVariant(kMips32r6));
   DCHECK(!(rt.is(zero_reg)));
-  positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
 }
 
@@ -1462,7 +1447,6 @@
   DCHECK(!IsMipsArchVariant(kMips32r6));
   DCHECK(!(rs.is(zero_reg)));
   BlockTrampolinePoolScope block_trampoline_pool(this);
-  positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
   BlockTrampolinePoolFor(1);  // For associated delay slot.
 }
@@ -1471,7 +1455,6 @@
 void Assembler::bltzalc(Register rt, int16_t offset) {
   DCHECK(IsMipsArchVariant(kMips32r6));
   DCHECK(!(rt.is(zero_reg)));
-  positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
 }
 
@@ -1479,7 +1462,6 @@
 void Assembler::bgtzalc(Register rt, int16_t offset) {
   DCHECK(IsMipsArchVariant(kMips32r6));
   DCHECK(!(rt.is(zero_reg)));
-  positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(BGTZ, zero_reg, rt, offset,
                     CompactBranchType::COMPACT_BRANCH);
 }
@@ -1488,7 +1470,6 @@
 void Assembler::beqzalc(Register rt, int16_t offset) {
   DCHECK(IsMipsArchVariant(kMips32r6));
   DCHECK(!(rt.is(zero_reg)));
-  positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(ADDI, zero_reg, rt, offset,
                     CompactBranchType::COMPACT_BRANCH);
 }
@@ -1497,7 +1478,6 @@
 void Assembler::bnezalc(Register rt, int16_t offset) {
   DCHECK(IsMipsArchVariant(kMips32r6));
   DCHECK(!(rt.is(zero_reg)));
-  positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(DADDI, zero_reg, rt, offset,
                     CompactBranchType::COMPACT_BRANCH);
 }
@@ -1556,9 +1536,6 @@
 void Assembler::jr(Register rs) {
   if (!IsMipsArchVariant(kMips32r6)) {
     BlockTrampolinePoolScope block_trampoline_pool(this);
-    if (rs.is(ra)) {
-      positions_recorder()->WriteRecordedPositions();
-    }
     GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
     BlockTrampolinePoolFor(1);  // For associated delay slot.
   } else {
@@ -1576,7 +1553,6 @@
   DCHECK(in_range && ((target & 3) == 0));
 #endif
   BlockTrampolinePoolScope block_trampoline_pool(this);
-  positions_recorder()->WriteRecordedPositions();
   GenInstrJump(JAL, (target >> 2) & kImm26Mask);
   BlockTrampolinePoolFor(1);  // For associated delay slot.
 }
@@ -1585,7 +1561,6 @@
 void Assembler::jalr(Register rs, Register rd) {
   DCHECK(rs.code() != rd.code());
   BlockTrampolinePoolScope block_trampoline_pool(this);
-  positions_recorder()->WriteRecordedPositions();
   GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
   BlockTrampolinePoolFor(1);  // For associated delay slot.
 }
@@ -1599,7 +1574,6 @@
 
 void Assembler::jialc(Register rt, int16_t offset) {
   DCHECK(IsMipsArchVariant(kMips32r6));
-  positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(POP76, zero_reg, rt, offset);
 }
 
@@ -1940,7 +1914,6 @@
   GenInstrImmediate(LUI, rs, rt, j);
 }
 
-
 // ---------PC-Relative instructions-----------
 
 void Assembler::addiupc(Register rs, int32_t imm19) {
@@ -2175,6 +2148,21 @@
   GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL);
 }
 
+// Byte swap.
+void Assembler::wsbh(Register rd, Register rt) {
+  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, WSBH, BSHFL);
+}
+
+void Assembler::seh(Register rd, Register rt) {
+  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEH, BSHFL);
+}
+
+void Assembler::seb(Register rd, Register rt) {
+  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL);
+}
 
 // --------Coprocessor-instructions----------------
 
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index c595cc9..8f4f9d9 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -125,8 +125,6 @@
     Register r = {code};
     return r;
   }
-  const char* ToString();
-  bool IsAllocatable() const;
   bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
   bool is(Register reg) const { return reg_code == reg.reg_code; }
   int code() const {
@@ -155,6 +153,8 @@
 
 Register ToRegister(int num);
 
+static const bool kSimpleFPAliasing = true;
+
 // Coprocessor register.
 struct FPURegister {
   enum Code {
@@ -173,8 +173,6 @@
   // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
   // number of Double regs (64-bit regs, or FPU-reg-pairs).
 
-  const char* ToString();
-  bool IsAllocatable() const;
   bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
   bool is(FPURegister reg) const { return reg_code == reg.reg_code; }
   FPURegister low() const {
@@ -850,6 +848,10 @@
   void bitswap(Register rd, Register rt);
   void align(Register rd, Register rs, Register rt, uint8_t bp);
 
+  void wsbh(Register rd, Register rt);
+  void seh(Register rd, Register rt);
+  void seb(Register rd, Register rt);
+
   // --------Coprocessor-instructions----------------
 
   // Load, store, and move.
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 76d0640..7d9d080 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -17,10 +17,7 @@
 
 #define __ ACCESS_MASM(masm)
 
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
-                                CFunctionId id,
-                                BuiltinExtraArguments extra_args) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
   // ----------- S t a t e -------------
   //  -- a0                 : number of arguments excluding receiver
   //  -- a1                 : target
@@ -39,23 +36,8 @@
   __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
 
   // Insert extra arguments.
-  int num_extra_args = 0;
-  switch (extra_args) {
-    case BuiltinExtraArguments::kTarget:
-      __ Push(a1);
-      ++num_extra_args;
-      break;
-    case BuiltinExtraArguments::kNewTarget:
-      __ Push(a3);
-      ++num_extra_args;
-      break;
-    case BuiltinExtraArguments::kTargetAndNewTarget:
-      __ Push(a1, a3);
-      num_extra_args += 2;
-      break;
-    case BuiltinExtraArguments::kNone:
-      break;
-  }
+  const int num_extra_args = 2;
+  __ Push(a1, a3);
 
   // JumpToExternalReference expects a0 to contain the number of arguments
   // including the receiver and the extra arguments.
@@ -145,6 +127,8 @@
 void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
   // ----------- S t a t e -------------
   //  -- a0                 : number of arguments
+  //  -- a1                 : function
+  //  -- cp                 : context
   //  -- ra                 : return address
   //  -- sp[(argc - n) * 8] : arg[n] (zero-based)
   //  -- sp[(argc + 1) * 8] : receiver
@@ -154,9 +138,9 @@
                                      : Heap::kMinusInfinityValueRootIndex;
 
   // Load the accumulator with the default return value (either -Infinity or
-  // +Infinity), with the tagged value in a1 and the double value in f0.
-  __ LoadRoot(a1, root_index);
-  __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+  // +Infinity), with the tagged value in t2 and the double value in f0.
+  __ LoadRoot(t2, root_index);
+  __ ldc1(f0, FieldMemOperand(t2, HeapNumber::kValueOffset));
   __ Addu(a3, a0, Operand(1));
 
   Label done_loop, loop;
@@ -171,35 +155,39 @@
     __ lw(a2, MemOperand(at));
 
     // Load the double value of the parameter into f2, maybe converting the
-    // parameter to a number first using the ToNumberStub if necessary.
+    // parameter to a number first using the ToNumber builtin if necessary.
     Label convert, convert_smi, convert_number, done_convert;
     __ bind(&convert);
     __ JumpIfSmi(a2, &convert_smi);
     __ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
     __ JumpIfRoot(t0, Heap::kHeapNumberMapRootIndex, &convert_number);
     {
-      // Parameter is not a Number, use the ToNumberStub to convert it.
-      FrameScope scope(masm, StackFrame::INTERNAL);
+      // Parameter is not a Number, use the ToNumber builtin to convert it.
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ Push(ra, fp);
+      __ Move(fp, sp);
+      __ Push(cp, a1);
       __ SmiTag(a0);
       __ SmiTag(a3);
-      __ Push(a0, a1, a3);
+      __ Push(a0, t2, a3);
       __ mov(a0, a2);
-      ToNumberStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
       __ mov(a2, v0);
-      __ Pop(a0, a1, a3);
+      __ Pop(a0, t2, a3);
       {
         // Restore the double accumulator value (f0).
         Label restore_smi, done_restore;
-        __ JumpIfSmi(a1, &restore_smi);
-        __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+        __ JumpIfSmi(t2, &restore_smi);
+        __ ldc1(f0, FieldMemOperand(t2, HeapNumber::kValueOffset));
         __ jmp(&done_restore);
         __ bind(&restore_smi);
-        __ SmiToDoubleFPURegister(a1, f0, t0);
+        __ SmiToDoubleFPURegister(t2, f0, t0);
         __ bind(&done_restore);
       }
       __ SmiUntag(a3);
       __ SmiUntag(a0);
+      __ Pop(cp, a1);
+      __ Pop(ra, fp);
     }
     __ jmp(&convert);
     __ bind(&convert_number);
@@ -227,20 +215,20 @@
     __ Branch(&set_value, ne, t1, Operand(t8));
     __ jmp(&loop);
     __ bind(&set_value);
-    __ mov(a1, a2);
+    __ mov(t2, a2);
     __ jmp(&loop);
 
     // At least one side is NaN, which means that the result will be NaN too.
     __ bind(&compare_nan);
-    __ LoadRoot(a1, Heap::kNanValueRootIndex);
-    __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+    __ LoadRoot(t2, Heap::kNanValueRootIndex);
+    __ ldc1(f0, FieldMemOperand(t2, HeapNumber::kValueOffset));
     __ jmp(&loop);
   }
 
   __ bind(&done_loop);
   __ Lsa(sp, sp, a3, kPointerSizeLog2);
   __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a1);  // In delay slot.
+  __ mov(v0, t2);  // In delay slot.
 }
 
 // static
@@ -265,8 +253,7 @@
   }
 
   // 2a. Convert first argument to number.
-  ToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
+  __ Jump(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
 
   // 2b. No arguments, return +0.
   __ bind(&no_arguments);
@@ -314,8 +301,7 @@
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
       __ Push(a1, a3);
-      ToNumberStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
       __ Move(a0, v0);
       __ Pop(a1, a3);
     }
@@ -829,8 +815,8 @@
   __ AssertGeneratorObject(a1);
 
   // Store input value into generator object.
-  __ sw(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOffset));
-  __ RecordWriteField(a1, JSGeneratorObject::kInputOffset, v0, a3,
+  __ sw(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
+  __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3,
                       kRAHasNotBeenSaved, kDontSaveFPRegs);
 
   // Store resume mode into generator object.
@@ -841,20 +827,22 @@
   __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
 
   // Flood function if we are stepping.
-  Label skip_flooding;
-  ExternalReference step_in_enabled =
-      ExternalReference::debug_step_in_enabled_address(masm->isolate());
-  __ li(t1, Operand(step_in_enabled));
+  Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+  Label stepping_prepared;
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(masm->isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  __ li(t1, Operand(last_step_action));
   __ lb(t1, MemOperand(t1));
-  __ Branch(&skip_flooding, eq, t1, Operand(zero_reg));
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ Push(a1, a2, t0);
-    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
-    __ Pop(a1, a2);
-    __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
-  }
-  __ bind(&skip_flooding);
+  __ Branch(&prepare_step_in_if_stepping, ge, t1, Operand(StepIn));
+
+  // Flood function if we need to continue stepping in the suspended generator.
+  ExternalReference debug_suspended_generator =
+      ExternalReference::debug_suspended_generator_address(masm->isolate());
+  __ li(t1, Operand(debug_suspended_generator));
+  __ lw(t1, MemOperand(t1));
+  __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(t1));
+  __ bind(&stepping_prepared);
 
   // Push receiver.
   __ lw(t1, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
@@ -950,6 +938,42 @@
     __ Move(v0, a1);  // Continuation expects generator object in v0.
     __ Jump(a3);
   }
+
+  __ bind(&prepare_step_in_if_stepping);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(a1, a2, t0);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(a1, a2);
+  }
+  __ Branch(USE_DELAY_SLOT, &stepping_prepared);
+  __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+
+  __ bind(&prepare_step_in_suspended_generator);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(a1, a2);
+    __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+    __ Pop(a1, a2);
+  }
+  __ Branch(USE_DELAY_SLOT, &stepping_prepared);
+  __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+}
+
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
+  Register args_count = scratch;
+
+  // Get the arguments + receiver count.
+  __ lw(args_count,
+        MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ lw(args_count,
+        FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+  // Leave the frame (also dropping the register file).
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+
+  // Drop receiver + arguments.
+  __ Addu(sp, sp, args_count);
 }
 
 // Generate code for entering a JS function with the interpreter.
@@ -1054,16 +1078,7 @@
   masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
 
   // The return value is in v0.
-
-  // Get the arguments + reciever count.
-  __ lw(t0, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
-  __ lw(t0, FieldMemOperand(t0, BytecodeArray::kParameterSizeOffset));
-
-  // Leave the frame (also dropping the register file).
-  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
-
-  // Drop receiver + arguments and return.
-  __ Addu(sp, sp, t0);
+  LeaveInterpreterFrame(masm, t0);
   __ Jump(ra);
 
   // Load debug copy of the bytecode array.
@@ -1085,6 +1100,31 @@
   __ Jump(t0);
 }
 
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+  // Save the function and context for call to CompileBaseline.
+  __ lw(a1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+  __ lw(kContextRegister,
+        MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+  // Leave the frame before recompiling for baseline so that we don't count as
+  // an activation on the stack.
+  LeaveInterpreterFrame(masm, t0);
+
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    // Push return value.
+    __ push(v0);
+
+    // Push function as argument and compile for baseline.
+    __ push(a1);
+    __ CallRuntime(Runtime::kCompileBaseline);
+
+    // Restore return value.
+    __ pop(v0);
+  }
+  __ Jump(ra);
+}
+
 // static
 void Builtins::Generate_InterpreterPushArgsAndCallImpl(
     MacroAssembler* masm, TailCallMode tail_call_mode) {
@@ -1241,13 +1281,28 @@
                               SharedFunctionInfo::kOffsetToPreviousOsrAstId));
   const int bailout_id = BailoutId::None().ToInt();
   __ Branch(&loop_bottom, ne, temp, Operand(Smi::FromInt(bailout_id)));
+
   // Literals available?
+  Label got_literals, maybe_cleared_weakcell;
   __ lw(temp, FieldMemOperand(array_pointer,
                               SharedFunctionInfo::kOffsetToPreviousLiterals));
+  // temp contains either a WeakCell pointing to the literals array or the
+  // literals array directly.
+  STATIC_ASSERT(WeakCell::kValueOffset == FixedArray::kLengthOffset);
+  __ lw(t0, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ JumpIfSmi(t0, &maybe_cleared_weakcell);
+  // t0 is a pointer, therefore temp is a WeakCell pointing to a literals array.
   __ lw(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
-  __ JumpIfSmi(temp, &gotta_call_runtime);
+  __ jmp(&got_literals);
+
+  // t0 is a smi. If it's 0, then we are looking at a cleared WeakCell
+  // around the literals array, and we should visit the runtime. If it's > 0,
+  // then temp already contains the literals array.
+  __ bind(&maybe_cleared_weakcell);
+  __ Branch(&gotta_call_runtime, eq, t0, Operand(Smi::FromInt(0)));
 
   // Save the literals in the closure.
+  __ bind(&got_literals);
   __ lw(t0, MemOperand(sp, 0));
   __ sw(temp, FieldMemOperand(t0, JSFunction::kLiteralsOffset));
   __ push(index);
@@ -1660,6 +1715,9 @@
 void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
                                                int field_index) {
   // ----------- S t a t e -------------
+  //  -- a0    : number of arguments
+  //  -- a1    : function
+  //  -- cp    : context
   //  -- sp[0] : receiver
   // -----------------------------------
 
@@ -1699,7 +1757,14 @@
 
   // 3. Raise a TypeError if the receiver is not a date.
   __ bind(&receiver_not_date);
-  __ TailCallRuntime(Runtime::kThrowNotDateError);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ Push(a0, ra, fp);
+    __ Move(fp, sp);
+    __ Push(cp, a1);
+    __ Push(Smi::FromInt(0));
+    __ CallRuntime(Runtime::kThrowNotDateError);
+  }
 }
 
 // static
@@ -2697,6 +2762,83 @@
   __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
 }
 
+// static
+void Builtins::Generate_StringToNumber(MacroAssembler* masm) {
+  // The StringToNumber stub takes on argument in a0.
+  __ AssertString(a0);
+
+  // Check if string has a cached array index.
+  Label runtime;
+  __ lw(a2, FieldMemOperand(a0, String::kHashFieldOffset));
+  __ And(at, a2, Operand(String::kContainsCachedArrayIndexMask));
+  __ Branch(&runtime, ne, at, Operand(zero_reg));
+  __ IndexFromHash(a2, v0);
+  __ Ret();
+
+  __ bind(&runtime);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    // Push argument.
+    __ Push(a0);
+    // We cannot use a tail call here because this builtin can also be called
+    // from wasm.
+    __ CallRuntime(Runtime::kStringToNumber);
+  }
+  __ Ret();
+}
+
+// static
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+  // The ToNumber stub takes one argument in a0.
+  Label not_smi;
+  __ JumpIfNotSmi(a0, &not_smi);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
+  __ bind(&not_smi);
+
+  Label not_heap_number;
+  __ GetObjectType(a0, a1, a1);
+  // a0: receiver
+  // a1: receiver instance type
+  __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
+  __ bind(&not_heap_number);
+
+  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+          RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_NonNumberToNumber(MacroAssembler* masm) {
+  // The NonNumberToNumber stub takes on argument in a0.
+  __ AssertNotNumber(a0);
+
+  Label not_string;
+  __ GetObjectType(a0, a1, a1);
+  // a0: receiver
+  // a1: receiver instance type
+  __ Branch(&not_string, hs, a1, Operand(FIRST_NONSTRING_TYPE));
+  __ Jump(masm->isolate()->builtins()->StringToNumber(),
+          RelocInfo::CODE_TARGET);
+  __ bind(&not_string);
+
+  Label not_oddball;
+  __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
+  __ Ret(USE_DELAY_SLOT);
+  __ lw(v0, FieldMemOperand(a0, Oddball::kToNumberOffset));  // In delay slot.
+  __ bind(&not_oddball);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    // Push argument.
+    __ Push(a0);
+    // We cannot use a tail call here because this builtin can also be called
+    // from wasm.
+    __ CallRuntime(Runtime::kToNumber);
+  }
+  __ Ret();
+}
+
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // State setup as expected by MacroAssembler::InvokePrologue.
   // ----------- S t a t e -------------
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 4084964..3213677 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -21,70 +21,29 @@
 namespace v8 {
 namespace internal {
 
+#define __ ACCESS_MASM(masm)
 
-static void InitializeArrayConstructorDescriptor(
-    Isolate* isolate, CodeStubDescriptor* descriptor,
-    int constant_stack_parameter_count) {
-  Address deopt_handler = Runtime::FunctionForId(
-      Runtime::kArrayConstructor)->entry;
-
-  if (constant_stack_parameter_count == 0) {
-    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  } else {
-    descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  }
+void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
+  __ sll(t9, a0, kPointerSizeLog2);
+  __ Addu(t9, sp, t9);
+  __ sw(a1, MemOperand(t9, 0));
+  __ Push(a1);
+  __ Push(a2);
+  __ Addu(a0, a0, Operand(3));
+  __ TailCallRuntime(Runtime::kNewArray);
 }
 
-
-static void InitializeInternalArrayConstructorDescriptor(
-    Isolate* isolate, CodeStubDescriptor* descriptor,
-    int constant_stack_parameter_count) {
-  Address deopt_handler = Runtime::FunctionForId(
-      Runtime::kInternalArrayConstructor)->entry;
-
-  if (constant_stack_parameter_count == 0) {
-    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  } else {
-    descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  }
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
   Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
   descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
 }
 
-void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+void FastFunctionBindStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
+  Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
+  descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
 }
 
-
-void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
-#define __ ACCESS_MASM(masm)
-
 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
                                           Condition cc);
 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
@@ -1021,7 +980,7 @@
   CEntryStub::GenerateAheadOfTime(isolate);
   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
-  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+  CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
   CreateWeakCellStub::GenerateAheadOfTime(isolate);
   BinaryOpICStub::GenerateAheadOfTime(isolate);
@@ -1440,7 +1399,6 @@
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
                                           &miss,  // When index out of range.
-                                          STRING_INDEX_IS_ARRAY_INDEX,
                                           RECEIVER_IS_STRING);
   char_at_generator.GenerateFast(masm);
   __ Ret();
@@ -1920,6 +1878,7 @@
   // a2 : feedback vector
   // a3 : slot in feedback vector (Smi)
   Label initialize, done, miss, megamorphic, not_array_function;
+  Label done_initialize_count, done_increment_count;
 
   DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
             masm->isolate()->heap()->megamorphic_symbol());
@@ -1938,7 +1897,7 @@
   Register feedback_map = t1;
   Register weak_value = t4;
   __ lw(weak_value, FieldMemOperand(t2, WeakCell::kValueOffset));
-  __ Branch(&done, eq, a1, Operand(weak_value));
+  __ Branch(&done_increment_count, eq, a1, Operand(weak_value));
   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
   __ Branch(&done, eq, t2, Operand(at));
   __ lw(feedback_map, FieldMemOperand(t2, HeapObject::kMapOffset));
@@ -1960,7 +1919,7 @@
   // Make sure the function is the Array() function
   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2);
   __ Branch(&megamorphic, ne, a1, Operand(t2));
-  __ jmp(&done);
+  __ jmp(&done_increment_count);
 
   __ bind(&miss);
 
@@ -1987,11 +1946,27 @@
   // slot.
   CreateAllocationSiteStub create_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &create_stub);
-  __ Branch(&done);
+  __ Branch(&done_initialize_count);
 
   __ bind(&not_array_function);
   CreateWeakCellStub weak_cell_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &weak_cell_stub);
+
+  __ bind(&done_initialize_count);
+  // Initialize the call counter.
+  __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
+  __ li(t0, Operand(Smi::FromInt(1)));
+  __ Branch(USE_DELAY_SLOT, &done);
+  __ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
+
+  __ bind(&done_increment_count);
+
+  // Increment the call count for monomorphic function calls.
+  __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
+  __ lw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
+  __ Addu(t0, t0, Operand(Smi::FromInt(1)));
+  __ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
+
   __ bind(&done);
 }
 
@@ -2052,7 +2027,7 @@
   // Increment the call count for monomorphic function calls.
   __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
   __ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
-  __ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+  __ Addu(a3, a3, Operand(Smi::FromInt(1)));
   __ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
 
   __ mov(a2, t0);
@@ -2098,7 +2073,7 @@
   // Increment the call count for monomorphic function calls.
   __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
   __ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
-  __ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+  __ Addu(a3, a3, Operand(Smi::FromInt(1)));
   __ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
 
   __ bind(&call_function);
@@ -2169,7 +2144,7 @@
 
   // Initialize the call counter.
   __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
-  __ li(t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+  __ li(t0, Operand(Smi::FromInt(1)));
   __ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
 
   // Store the function. Use a stub since we need a frame for allocation.
@@ -2269,13 +2244,7 @@
   } else {
     __ Push(object_, index_);
   }
-  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
-    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
-  } else {
-    DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
-    // NumberToSmi discards numbers that are not exact integers.
-    __ CallRuntime(Runtime::kNumberToSmi);
-  }
+  __ CallRuntime(Runtime::kNumberToSmi);
 
   // Save the conversion result before the pop instructions below
   // have a chance to overwrite it.
@@ -2617,74 +2586,13 @@
   // a3: from index (untagged)
   __ SmiTag(a3, a3);
   StringCharAtGenerator generator(v0, a3, a2, v0, &runtime, &runtime, &runtime,
-                                  STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
+                                  RECEIVER_IS_STRING);
   generator.GenerateFast(masm);
   __ DropAndRet(3);
   generator.SkipSlow(masm, &runtime);
 }
 
 
-void ToNumberStub::Generate(MacroAssembler* masm) {
-  // The ToNumber stub takes one argument in a0.
-  Label not_smi;
-  __ JumpIfNotSmi(a0, &not_smi);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);
-  __ bind(&not_smi);
-
-  Label not_heap_number;
-  __ GetObjectType(a0, a1, a1);
-  // a0: receiver
-  // a1: receiver instance type
-  __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);
-  __ bind(&not_heap_number);
-
-  NonNumberToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
-}
-
-void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
-  // The NonNumberToNumber stub takes on argument in a0.
-  __ AssertNotNumber(a0);
-
-  Label not_string;
-  __ GetObjectType(a0, a1, a1);
-  // a0: receiver
-  // a1: receiver instance type
-  __ Branch(&not_string, hs, a1, Operand(FIRST_NONSTRING_TYPE));
-  StringToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
-  __ bind(&not_string);
-
-  Label not_oddball;
-  __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
-  __ Ret(USE_DELAY_SLOT);
-  __ lw(v0, FieldMemOperand(a0, Oddball::kToNumberOffset));  // In delay slot.
-  __ bind(&not_oddball);
-
-  __ Push(a0);  // Push argument.
-  __ TailCallRuntime(Runtime::kToNumber);
-}
-
-void StringToNumberStub::Generate(MacroAssembler* masm) {
-  // The StringToNumber stub takes on argument in a0.
-  __ AssertString(a0);
-
-  // Check if string has a cached array index.
-  Label runtime;
-  __ lw(a2, FieldMemOperand(a0, String::kHashFieldOffset));
-  __ And(at, a2, Operand(String::kContainsCachedArrayIndexMask));
-  __ Branch(&runtime, ne, at, Operand(zero_reg));
-  __ IndexFromHash(a2, v0);
-  __ Ret();
-
-  __ bind(&runtime);
-  __ Push(a0);  // Push argument.
-  __ TailCallRuntime(Runtime::kStringToNumber);
-}
-
 void ToStringStub::Generate(MacroAssembler* masm) {
   // The ToString stub takes on argument in a0.
   Label is_number;
@@ -2865,7 +2773,7 @@
   // Load a2 with the allocation site. We stick an undefined dummy value here
   // and replace it with the real allocation site later when we instantiate this
   // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
-  __ li(a2, handle(isolate()->heap()->undefined_value()));
+  __ li(a2, isolate()->factory()->undefined_value());
 
   // Make sure that we actually patched the allocation site.
   if (FLAG_debug_code) {
@@ -3760,14 +3668,14 @@
 
 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  LoadICStub stub(isolate(), state());
+  LoadICStub stub(isolate());
   stub.GenerateForTrampoline(masm);
 }
 
 
 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  KeyedLoadICStub stub(isolate(), state());
+  KeyedLoadICStub stub(isolate());
   stub.GenerateForTrampoline(masm);
 }
 
@@ -4384,19 +4292,13 @@
   }
 }
 
-
-void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
       isolate);
   ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
       isolate);
-  ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
-      isolate);
-}
-
-
-void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
-    Isolate* isolate) {
+  ArrayNArgumentsConstructorStub stub(isolate);
+  stub.GetCode();
   ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
   for (int i = 0; i < 2; i++) {
     // For internal arrays we only need a few things.
@@ -4404,8 +4306,6 @@
     stubh1.GetCode();
     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
     stubh2.GetCode();
-    InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
-    stubh3.GetCode();
   }
 }
 
@@ -4424,13 +4324,15 @@
     CreateArrayDispatchOneArgument(masm, mode);
 
     __ bind(&not_one_case);
-    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+    ArrayNArgumentsConstructorStub stub(masm->isolate());
+    __ TailCallStub(&stub);
   } else if (argument_count() == NONE) {
     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
   } else if (argument_count() == ONE) {
     CreateArrayDispatchOneArgument(masm, mode);
   } else if (argument_count() == MORE_THAN_ONE) {
-    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+    ArrayNArgumentsConstructorStub stub(masm->isolate());
+    __ TailCallStub(&stub);
   } else {
     UNREACHABLE();
   }
@@ -4514,7 +4416,7 @@
   InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
   __ TailCallStub(&stub0, lo, a0, Operand(1));
 
-  InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+  ArrayNArgumentsConstructorStub stubN(isolate());
   __ TailCallStub(&stubN, hi, a0, Operand(1));
 
   if (IsFastPackedElementsKind(kind)) {
@@ -4738,10 +4640,10 @@
   // specified by the function's internal formal parameter count.
   Label rest_parameters;
   __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ lw(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-  __ lw(a1,
-        FieldMemOperand(a1, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ Subu(a0, a0, Operand(a1));
+  __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(a3,
+        FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ Subu(a0, a0, Operand(a3));
   __ Branch(&rest_parameters, gt, a0, Operand(zero_reg));
 
   // Return an empty rest parameter array.
@@ -4788,15 +4690,16 @@
     // ----------- S t a t e -------------
     //  -- cp : context
     //  -- a0 : number of rest parameters (tagged)
+    //  -- a1 : function
     //  -- a2 : pointer to first rest parameters
     //  -- ra : return address
     // -----------------------------------
 
     // Allocate space for the rest parameter array plus the backing store.
     Label allocate, done_allocate;
-    __ li(a1, Operand(JSArray::kSize + FixedArray::kHeaderSize));
-    __ Lsa(a1, a1, a0, kPointerSizeLog2 - 1);
-    __ Allocate(a1, v0, a3, t0, &allocate, NO_ALLOCATION_FLAGS);
+    __ li(t0, Operand(JSArray::kSize + FixedArray::kHeaderSize));
+    __ Lsa(t0, t0, a0, kPointerSizeLog2 - 1);
+    __ Allocate(t0, v0, a3, t1, &allocate, NO_ALLOCATION_FLAGS);
     __ bind(&done_allocate);
 
     // Setup the elements array in v0.
@@ -4829,16 +4732,24 @@
     __ Ret(USE_DELAY_SLOT);
     __ mov(v0, a3);  // In delay slot
 
-    // Fall back to %AllocateInNewSpace.
+    // Fall back to %AllocateInNewSpace (if not too big).
+    Label too_big_for_new_space;
     __ bind(&allocate);
+    __ Branch(&too_big_for_new_space, gt, t0,
+              Operand(Page::kMaxRegularHeapObjectSize));
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
-      __ SmiTag(a1);
-      __ Push(a0, a2, a1);
+      __ SmiTag(t0);
+      __ Push(a0, a2, t0);
       __ CallRuntime(Runtime::kAllocateInNewSpace);
       __ Pop(a0, a2);
     }
     __ jmp(&done_allocate);
+
+    // Fall back to %NewStrictArguments.
+    __ bind(&too_big_for_new_space);
+    __ Push(a1);
+    __ TailCallRuntime(Runtime::kNewStrictArguments);
   }
 }
 
@@ -5104,9 +5015,9 @@
   __ Branch(&arguments_adaptor, eq, a0,
             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   {
-    __ lw(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+    __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
     __ lw(a0,
-          FieldMemOperand(a1, SharedFunctionInfo::kFormalParameterCountOffset));
+          FieldMemOperand(t0, SharedFunctionInfo::kFormalParameterCountOffset));
     __ Lsa(a2, a2, a0, kPointerSizeLog2 - 1);
     __ Addu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
                             1 * kPointerSize));
@@ -5124,15 +5035,16 @@
   // ----------- S t a t e -------------
   //  -- cp : context
   //  -- a0 : number of rest parameters (tagged)
+  //  -- a1 : function
   //  -- a2 : pointer to first rest parameters
   //  -- ra : return address
   // -----------------------------------
 
   // Allocate space for the strict arguments object plus the backing store.
   Label allocate, done_allocate;
-  __ li(a1, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
-  __ Lsa(a1, a1, a0, kPointerSizeLog2 - 1);
-  __ Allocate(a1, v0, a3, t0, &allocate, NO_ALLOCATION_FLAGS);
+  __ li(t0, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+  __ Lsa(t0, t0, a0, kPointerSizeLog2 - 1);
+  __ Allocate(t0, v0, a3, t1, &allocate, NO_ALLOCATION_FLAGS);
   __ bind(&done_allocate);
 
   // Setup the elements array in v0.
@@ -5165,46 +5077,24 @@
   __ Ret(USE_DELAY_SLOT);
   __ mov(v0, a3);  // In delay slot
 
-  // Fall back to %AllocateInNewSpace.
+  // Fall back to %AllocateInNewSpace (if not too big).
+  Label too_big_for_new_space;
   __ bind(&allocate);
+  __ Branch(&too_big_for_new_space, gt, t0,
+            Operand(Page::kMaxRegularHeapObjectSize));
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
-    __ SmiTag(a1);
-    __ Push(a0, a2, a1);
+    __ SmiTag(t0);
+    __ Push(a0, a2, t0);
     __ CallRuntime(Runtime::kAllocateInNewSpace);
     __ Pop(a0, a2);
   }
   __ jmp(&done_allocate);
-}
 
-
-void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
-  Register context_reg = cp;
-  Register slot_reg = a2;
-  Register result_reg = v0;
-  Label slow_case;
-
-  // Go up context chain to the script context.
-  for (int i = 0; i < depth(); ++i) {
-    __ lw(result_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX));
-    context_reg = result_reg;
-  }
-
-  // Load the PropertyCell value at the specified slot.
-  __ Lsa(at, context_reg, slot_reg, kPointerSizeLog2);
-  __ lw(result_reg, ContextMemOperand(at, 0));
-  __ lw(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset));
-
-  // Check that value is not the_hole.
-  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-  __ Branch(&slow_case, eq, result_reg, Operand(at));
-  __ Ret();
-
-  // Fallback to the runtime.
-  __ bind(&slow_case);
-  __ SmiTag(slot_reg);
-  __ Push(slot_reg);
-  __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
+  // Fall back to %NewStrictArguments.
+  __ bind(&too_big_for_new_space);
+  __ Push(a1);
+  __ TailCallRuntime(Runtime::kNewStrictArguments);
 }
 
 
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index 63bbda3..07cab80 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -16,60 +16,6 @@
 
 #define __ masm.
 
-
-#if defined(USE_SIMULATOR)
-byte* fast_exp_mips_machine_code = nullptr;
-double fast_exp_simulator(double x, Isolate* isolate) {
-  return Simulator::current(isolate)->CallFP(fast_exp_mips_machine_code, x, 0);
-}
-#endif
-
-
-UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
-  size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
-  if (buffer == nullptr) return nullptr;
-  ExternalReference::InitializeMathExpData();
-
-  MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
-                      CodeObjectRequired::kNo);
-
-  {
-    DoubleRegister input = f12;
-    DoubleRegister result = f0;
-    DoubleRegister double_scratch1 = f4;
-    DoubleRegister double_scratch2 = f6;
-    Register temp1 = t0;
-    Register temp2 = t1;
-    Register temp3 = t2;
-
-    __ MovFromFloatParameter(input);
-    __ Push(temp3, temp2, temp1);
-    MathExpGenerator::EmitMathExp(
-        &masm, input, result, double_scratch1, double_scratch2,
-        temp1, temp2, temp3);
-    __ Pop(temp3, temp2, temp1);
-    __ MovToFloatResult(result);
-    __ Ret();
-  }
-
-  CodeDesc desc;
-  masm.GetCode(&desc);
-  DCHECK(!RelocInfo::RequiresRelocation(desc));
-
-  Assembler::FlushICache(isolate, buffer, actual_size);
-  base::OS::ProtectCode(buffer, actual_size);
-
-#if !defined(USE_SIMULATOR)
-  return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
-#else
-  fast_exp_mips_machine_code = buffer;
-  return &fast_exp_simulator;
-#endif
-}
-
-
 #if defined(V8_HOST_ARCH_MIPS)
 MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
                                                 MemCopyUint8Function stub) {
@@ -1092,95 +1038,6 @@
   __ bind(&done);
 }
 
-
-static MemOperand ExpConstant(int index, Register base) {
-  return MemOperand(base, index * kDoubleSize);
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
-                                   DoubleRegister input,
-                                   DoubleRegister result,
-                                   DoubleRegister double_scratch1,
-                                   DoubleRegister double_scratch2,
-                                   Register temp1,
-                                   Register temp2,
-                                   Register temp3) {
-  DCHECK(!input.is(result));
-  DCHECK(!input.is(double_scratch1));
-  DCHECK(!input.is(double_scratch2));
-  DCHECK(!result.is(double_scratch1));
-  DCHECK(!result.is(double_scratch2));
-  DCHECK(!double_scratch1.is(double_scratch2));
-  DCHECK(!temp1.is(temp2));
-  DCHECK(!temp1.is(temp3));
-  DCHECK(!temp2.is(temp3));
-  DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
-  DCHECK(!masm->serializer_enabled());  // External references not serializable.
-
-  Label zero, infinity, done;
-
-  __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
-
-  __ ldc1(double_scratch1, ExpConstant(0, temp3));
-  __ BranchF(&zero, NULL, ge, double_scratch1, input);
-
-  __ ldc1(double_scratch2, ExpConstant(1, temp3));
-  __ BranchF(&infinity, NULL, ge, input, double_scratch2);
-
-  __ ldc1(double_scratch1, ExpConstant(3, temp3));
-  __ ldc1(result, ExpConstant(4, temp3));
-  __ mul_d(double_scratch1, double_scratch1, input);
-  __ add_d(double_scratch1, double_scratch1, result);
-  __ FmoveLow(temp2, double_scratch1);
-  __ sub_d(double_scratch1, double_scratch1, result);
-  __ ldc1(result, ExpConstant(6, temp3));
-  __ ldc1(double_scratch2, ExpConstant(5, temp3));
-  __ mul_d(double_scratch1, double_scratch1, double_scratch2);
-  __ sub_d(double_scratch1, double_scratch1, input);
-  __ sub_d(result, result, double_scratch1);
-  __ mul_d(double_scratch2, double_scratch1, double_scratch1);
-  __ mul_d(result, result, double_scratch2);
-  __ ldc1(double_scratch2, ExpConstant(7, temp3));
-  __ mul_d(result, result, double_scratch2);
-  __ sub_d(result, result, double_scratch1);
-  // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
-  DCHECK(*reinterpret_cast<double*>
-         (ExternalReference::math_exp_constants(8).address()) == 1);
-  __ Move(double_scratch2, 1.);
-  __ add_d(result, result, double_scratch2);
-  __ srl(temp1, temp2, 11);
-  __ Ext(temp2, temp2, 0, 11);
-  __ Addu(temp1, temp1, Operand(0x3ff));
-
-  // Must not call ExpConstant() after overwriting temp3!
-  __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
-  __ Lsa(temp3, temp3, temp2, 3);
-  __ lw(temp2, MemOperand(temp3, Register::kMantissaOffset));
-  __ lw(temp3, MemOperand(temp3, Register::kExponentOffset));
-  // The first word is loaded is the lower number register.
-  if (temp2.code() < temp3.code()) {
-    __ sll(at, temp1, 20);
-    __ Or(temp1, temp3, at);
-    __ Move(double_scratch1, temp2, temp1);
-  } else {
-    __ sll(at, temp1, 20);
-    __ Or(temp1, temp2, at);
-    __ Move(double_scratch1, temp3, temp1);
-  }
-  __ mul_d(result, result, double_scratch1);
-  __ BranchShort(&done);
-
-  __ bind(&zero);
-  __ Move(result, kDoubleRegZero);
-  __ BranchShort(&done);
-
-  __ bind(&infinity);
-  __ ldc1(result, ExpConstant(2, temp3));
-
-  __ bind(&done);
-}
-
 #ifdef DEBUG
 // nop(CODE_AGE_MARKER_NOP)
 static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h
index ad7abb3..a4f8184 100644
--- a/src/mips/codegen-mips.h
+++ b/src/mips/codegen-mips.h
@@ -29,23 +29,6 @@
   DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
 };
 
-
-class MathExpGenerator : public AllStatic {
- public:
-  // Register input isn't modified. All other registers are clobbered.
-  static void EmitMathExp(MacroAssembler* masm,
-                          DoubleRegister input,
-                          DoubleRegister result,
-                          DoubleRegister double_scratch1,
-                          DoubleRegister double_scratch2,
-                          Register temp1,
-                          Register temp2,
-                          Register temp3);
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/mips/constants-mips.h b/src/mips/constants-mips.h
index f50a849..8301c5e 100644
--- a/src/mips/constants-mips.h
+++ b/src/mips/constants-mips.h
@@ -1186,11 +1186,10 @@
           int sa = SaFieldRaw() >> kSaShift;
           switch (sa) {
             case BITSWAP:
-              return kRegisterType;
             case WSBH:
             case SEB:
             case SEH:
-              return kUnsupported;
+              return kRegisterType;
           }
           sa >>= kBp2Bits;
           switch (sa) {
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index e9caaad..478b9df 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -117,8 +117,7 @@
 
   // Save all FPU registers before messing with them.
   __ Subu(sp, sp, Operand(kDoubleRegsSize));
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
     int code = config->GetAllocatableDoubleCode(i);
     const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
diff --git a/src/mips/disasm-mips.cc b/src/mips/disasm-mips.cc
index e1890ee..bd07874 100644
--- a/src/mips/disasm-mips.cc
+++ b/src/mips/disasm-mips.cc
@@ -1264,11 +1264,30 @@
           }
           break;
         }
-        case SEB:
-        case SEH:
-        case WSBH:
-          UNREACHABLE();
+        case SEB: {
+          if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+            Format(instr, "seb     'rd, 'rt");
+          } else {
+            Unknown(instr);
+          }
           break;
+        }
+        case SEH: {
+          if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+            Format(instr, "seh     'rd, 'rt");
+          } else {
+            Unknown(instr);
+          }
+          break;
+        }
+        case WSBH: {
+          if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+            Format(instr, "wsbh    'rd, 'rt");
+          } else {
+            Unknown(instr);
+          }
+          break;
+        }
         default: {
           sa >>= kBp2Bits;
           switch (sa) {
@@ -1699,7 +1718,7 @@
 namespace disasm {
 
 const char* NameConverter::NameOfAddress(byte* addr) const {
-  v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+  v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
   return tmp_buffer_.start();
 }
 
@@ -1762,8 +1781,8 @@
     buffer[0] = '\0';
     byte* prev_pc = pc;
     pc += d.InstructionDecode(buffer, pc);
-    v8::internal::PrintF(f, "%p    %08x      %s\n",
-        prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+    v8::internal::PrintF(f, "%p    %08x      %s\n", static_cast<void*>(prev_pc),
+                         *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
   }
 }
 
diff --git a/src/mips/interface-descriptors-mips.cc b/src/mips/interface-descriptors-mips.cc
index 30a7a74..a8e6e57 100644
--- a/src/mips/interface-descriptors-mips.cc
+++ b/src/mips/interface-descriptors-mips.cc
@@ -11,6 +11,14 @@
 
 const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
 
+void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
+    CallInterfaceDescriptorData* data, int register_parameter_count) {
+  const Register default_stub_registers[] = {a0, a1, a2, a3, t0};
+  CHECK_LE(static_cast<size_t>(register_parameter_count),
+           arraysize(default_stub_registers));
+  data->InitializePlatformSpecific(register_parameter_count,
+                                   default_stub_registers);
+}
 
 const Register LoadDescriptor::ReceiverRegister() { return a1; }
 const Register LoadDescriptor::NameRegister() { return a2; }
@@ -39,9 +47,6 @@
 const Register StoreTransitionDescriptor::MapRegister() { return a3; }
 
 
-const Register LoadGlobalViaContextDescriptor::SlotRegister() { return a2; }
-
-
 const Register StoreGlobalViaContextDescriptor::SlotRegister() { return a2; }
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return a0; }
 
@@ -63,8 +68,6 @@
 const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
 const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
 
-const Register HasPropertyDescriptor::ObjectRegister() { return a0; }
-const Register HasPropertyDescriptor::KeyRegister() { return a3; }
 
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -254,43 +257,24 @@
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
+void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // register state
   // a0 -- number of arguments
   // a1 -- function
   // a2 -- allocation site with elements kind
-  Register registers[] = {a1, a2};
+  Register registers[] = {a1, a2, a0};
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
+void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // stack param count needs (constructor pointer, and single argument)
   Register registers[] = {a1, a2, a0};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-void InternalArrayConstructorConstantArgCountDescriptor::
-    InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
-  // register state
-  // a0 -- number of arguments
-  // a1 -- constructor function
-  Register registers[] = {a1};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  // stack param count needs (constructor pointer, and single argument)
-  Register registers[] = {a1, a0};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastArrayPushDescriptor::InitializePlatformSpecific(
+void VarArgFunctionDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // stack param count needs (arg count)
   Register registers[] = {a0};
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 3dbfd6b..86aef38 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -18,6 +18,19 @@
 namespace v8 {
 namespace internal {
 
+// Floating point constants.
+const uint32_t kDoubleSignMask = HeapNumber::kSignMask;
+const uint32_t kDoubleExponentShift = HeapNumber::kExponentShift;
+const uint32_t kDoubleNaNShift = kDoubleExponentShift - 1;
+const uint32_t kDoubleNaNMask =
+    HeapNumber::kExponentMask | (1 << kDoubleNaNShift);
+
+const uint32_t kSingleSignMask = kBinary32SignMask;
+const uint32_t kSingleExponentMask = kBinary32ExponentMask;
+const uint32_t kSingleExponentShift = kBinary32ExponentShift;
+const uint32_t kSingleNaNShift = kSingleExponentShift - 1;
+const uint32_t kSingleNaNMask = kSingleExponentMask | (1 << kSingleNaNShift);
+
 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
                                CodeObjectRequired create_code_object)
     : Assembler(arg_isolate, buffer, size),
@@ -30,7 +43,6 @@
   }
 }
 
-
 void MacroAssembler::Load(Register dst,
                           const MemOperand& src,
                           Representation r) {
@@ -67,7 +79,6 @@
   }
 }
 
-
 void MacroAssembler::LoadRoot(Register destination,
                               Heap::RootListIndex index) {
   lw(destination, MemOperand(s6, index << kPointerSizeLog2));
@@ -1191,6 +1202,79 @@
 
 // ------------Pseudo-instructions-------------
 
+// Word Swap Byte
+void MacroAssembler::ByteSwapSigned(Register reg, int operand_size) {
+  DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4);
+  if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+    if (operand_size == 2) {
+      seh(reg, reg);
+    } else if (operand_size == 1) {
+      seb(reg, reg);
+    }
+    // No need to do any preparation if operand_size is 4
+
+    wsbh(reg, reg);
+    rotr(reg, reg, 16);
+  } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) {
+    if (operand_size == 1) {
+      sll(reg, reg, 24);
+      sra(reg, reg, 24);
+    } else if (operand_size == 2) {
+      sll(reg, reg, 16);
+      sra(reg, reg, 16);
+    }
+    // No need to do any preparation if operand_size is 4
+
+    Register tmp = t0;
+    Register tmp2 = t1;
+
+    andi(tmp2, reg, 0xFF);
+    sll(tmp2, tmp2, 24);
+    or_(tmp, zero_reg, tmp2);
+
+    andi(tmp2, reg, 0xFF00);
+    sll(tmp2, tmp2, 8);
+    or_(tmp, tmp, tmp2);
+
+    srl(reg, reg, 8);
+    andi(tmp2, reg, 0xFF00);
+    or_(tmp, tmp, tmp2);
+
+    srl(reg, reg, 16);
+    andi(tmp2, reg, 0xFF);
+    or_(tmp, tmp, tmp2);
+
+    or_(reg, tmp, zero_reg);
+  }
+}
+
+void MacroAssembler::ByteSwapUnsigned(Register reg, int operand_size) {
+  DCHECK(operand_size == 1 || operand_size == 2);
+
+  if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+    if (operand_size == 1) {
+      andi(reg, reg, 0xFF);
+    } else {
+      andi(reg, reg, 0xFFFF);
+    }
+    // No need to do any preparation if operand_size is 4
+
+    wsbh(reg, reg);
+    rotr(reg, reg, 16);
+  } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) {
+    if (operand_size == 1) {
+      sll(reg, reg, 24);
+    } else {
+      Register tmp = t0;
+
+      andi(tmp, reg, 0xFF00);
+      sll(reg, reg, 24);
+      sll(tmp, tmp, 8);
+      or_(reg, tmp, reg);
+    }
+  }
+}
+
 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
   DCHECK(!rd.is(at));
   DCHECK(!rs.rm().is(at));
@@ -3832,9 +3916,6 @@
   Label start;
   bind(&start);
   int32_t target_int = reinterpret_cast<int32_t>(target);
-  // Must record previous source positions before the
-  // li() generates a new code target.
-  positions_recorder()->WriteRecordedPositions();
   li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
   Call(t9, cond, rs, rt, bd);
   DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
@@ -4655,9 +4736,7 @@
                                                  int elements_offset) {
   DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2,
                      scratch3));
-  Label smi_value, maybe_nan, have_double_value, is_nan, done;
-  Register mantissa_reg = scratch2;
-  Register exponent_reg = scratch3;
+  Label smi_value, done;
 
   // Handle smi values specially.
   JumpIfSmi(value_reg, &smi_value);
@@ -4669,52 +4748,97 @@
            fail,
            DONT_DO_SMI_CHECK);
 
-  // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
-  // in the exponent.
-  li(scratch1, Operand(kHoleNanUpper32 & HeapNumber::kExponentMask));
-  lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
-  Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
+  // Double value, turn potential sNaN into qNan.
+  DoubleRegister double_result = f0;
+  DoubleRegister double_scratch = f2;
 
-  lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-
-  bind(&have_double_value);
-  Lsa(scratch1, elements_reg, key_reg, kDoubleSizeLog2 - kSmiTagSize);
-  sw(mantissa_reg,
-      FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
-          + kHoleNanLower32Offset));
-  sw(exponent_reg,
-      FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
-          + kHoleNanUpper32Offset));
-  jmp(&done);
-
-  bind(&maybe_nan);
-  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
-  // it's an Infinity, and the non-NaN code path applies.
-  Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
-  lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-  Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
-  bind(&is_nan);
-  // Load canonical NaN for storing into the double array.
-  LoadRoot(at, Heap::kNanValueRootIndex);
-  lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
-  lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
-  jmp(&have_double_value);
+  ldc1(double_result, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+  Branch(USE_DELAY_SLOT, &done);  // Canonicalization is one instruction.
+  FPUCanonicalizeNaN(double_result, double_result);
 
   bind(&smi_value);
+  Register untagged_value = scratch2;
+  SmiUntag(untagged_value, value_reg);
+  mtc1(untagged_value, double_scratch);
+  cvt_d_w(double_result, double_scratch);
+
+  bind(&done);
   Addu(scratch1, elements_reg,
       Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
               elements_offset));
   Lsa(scratch1, scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
   // scratch1 is now effective address of the double element
+  sdc1(double_result, MemOperand(scratch1, 0));
+}
 
-  Register untagged_value = scratch2;
-  SmiUntag(untagged_value, value_reg);
-  mtc1(untagged_value, f2);
-  cvt_d_w(f0, f2);
-  sdc1(f0, MemOperand(scratch1, 0));
+void MacroAssembler::SubNanPreservePayloadAndSign_s(FloatRegister fd,
+                                                    FloatRegister fs,
+                                                    FloatRegister ft) {
+  FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd;
+  Label check_nan, save_payload, done;
+  Register scratch1 = t8;
+  Register scratch2 = t9;
+
+  sub_s(dest, fs, ft);
+  // Check if the result of subtraction is NaN.
+  BranchF32(nullptr, &check_nan, eq, fs, ft);
+  Branch(USE_DELAY_SLOT, &done);
+  dest.is(fd) ? nop() : mov_s(fd, dest);
+
+  bind(&check_nan);
+  // Check if first operand is a NaN.
+  mfc1(scratch1, fs);
+  BranchF32(nullptr, &save_payload, eq, fs, fs);
+  // Second operand must be a NaN.
+  mfc1(scratch1, ft);
+
+  bind(&save_payload);
+  // Reserve payload.
+  And(scratch1, scratch1,
+      Operand(kSingleSignMask | ((1 << kSingleNaNShift) - 1)));
+  mfc1(scratch2, dest);
+  And(scratch2, scratch2, Operand(kSingleNaNMask));
+  Or(scratch2, scratch2, scratch1);
+  mtc1(scratch2, fd);
+
   bind(&done);
 }
 
+void MacroAssembler::SubNanPreservePayloadAndSign_d(DoubleRegister fd,
+                                                    DoubleRegister fs,
+                                                    DoubleRegister ft) {
+  FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd;
+  Label check_nan, save_payload, done;
+  Register scratch1 = t8;
+  Register scratch2 = t9;
+
+  sub_d(dest, fs, ft);
+  // Check if the result of subtraction is NaN.
+  BranchF64(nullptr, &check_nan, eq, fs, ft);
+  Branch(USE_DELAY_SLOT, &done);
+  dest.is(fd) ? nop() : mov_d(fd, dest);
+
+  bind(&check_nan);
+  // Check if first operand is a NaN.
+  Mfhc1(scratch1, fs);
+  mov_s(dest, fs);
+  BranchF64(nullptr, &save_payload, eq, fs, fs);
+  // Second operand must be a NaN.
+  Mfhc1(scratch1, ft);
+  mov_s(dest, ft);
+
+  bind(&save_payload);
+  // Reserve payload.
+  And(scratch1, scratch1,
+      Operand(kDoubleSignMask | ((1 << kDoubleNaNShift) - 1)));
+  Mfhc1(scratch2, dest);
+  And(scratch2, scratch2, Operand(kDoubleNaNMask));
+  Or(scratch2, scratch2, scratch1);
+  Move_s(fd, dest);
+  Mthc1(scratch2, fd);
+
+  bind(&done);
+}
 
 void MacroAssembler::CompareMapAndBranch(Register obj,
                                          Register scratch,
@@ -4778,6 +4902,10 @@
   Branch(fail, ne, scratch, Operand(at));
 }
 
+void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
+                                        const DoubleRegister src) {
+  sub_d(dst, src, kDoubleRegZero);
+}
 
 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
   li(value, Operand(cell));
@@ -4998,11 +5126,12 @@
                                              const ParameterCount& expected,
                                              const ParameterCount& actual) {
   Label skip_flooding;
-  ExternalReference step_in_enabled =
-      ExternalReference::debug_step_in_enabled_address(isolate());
-  li(t0, Operand(step_in_enabled));
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  li(t0, Operand(last_step_action));
   lb(t0, MemOperand(t0));
-  Branch(&skip_flooding, eq, t0, Operand(zero_reg));
+  Branch(&skip_flooding, lt, t0, Operand(StepIn));
   {
     FrameScope frame(this,
                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -5746,9 +5875,8 @@
 
 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
   lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  lw(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
-  lw(vector,
-     FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+  lw(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
+  lw(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
 }
 
 
@@ -6662,8 +6790,7 @@
   if (reg5.is_valid()) regs |= reg5.bit();
   if (reg6.is_valid()) regs |= reg6.bit();
 
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
     int code = config->GetAllocatableGeneralCode(i);
     Register candidate = Register::from_code(code);
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 2417025..8c6e5bd 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -687,6 +687,10 @@
   // ---------------------------------------------------------------------------
   // Pseudo-instructions.
 
+  // Change endianness
+  void ByteSwapSigned(Register reg, int operand_size);
+  void ByteSwapUnsigned(Register reg, int operand_size);
+
   void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
 
   void Ulh(Register rd, const MemOperand& rs);
@@ -871,6 +875,12 @@
   void Floor_w_d(FPURegister fd, FPURegister fs);
   void Ceil_w_d(FPURegister fd, FPURegister fs);
 
+  // Preserve value of a NaN operand
+  void SubNanPreservePayloadAndSign_s(FPURegister fd, FPURegister fs,
+                                      FPURegister ft);
+  void SubNanPreservePayloadAndSign_d(FPURegister fd, FPURegister fs,
+                                      FPURegister ft);
+
   // FP32 mode: Move the general purpose register into
   // the high part of the double-register pair.
   // FP64 mode: Move the general-purpose register into
@@ -1231,6 +1241,9 @@
                        Handle<WeakCell> cell, Handle<Code> success,
                        SmiCheckType smi_check_type);
 
+  // If the value is a NaN, canonicalize the value else, do nothing.
+  void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
+
   // Get value of the weak cell.
   void GetWeakValue(Register value, Handle<WeakCell> cell);
 
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
index f8dc515..71dcda2 100644
--- a/src/mips/simulator-mips.cc
+++ b/src/mips/simulator-mips.cc
@@ -864,9 +864,7 @@
   last_debugger_input_ = input;
 }
 
-
-void Simulator::FlushICache(v8::internal::HashMap* i_cache,
-                            void* start_addr,
+void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
                             size_t size) {
   intptr_t start = reinterpret_cast<intptr_t>(start_addr);
   int intra_line = (start & CachePage::kLineMask);
@@ -887,10 +885,8 @@
   }
 }
 
-
-CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
-  v8::internal::HashMap::Entry* entry =
-      i_cache->LookupOrInsert(page, ICacheHash(page));
+CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
+  base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
   if (entry->value == NULL) {
     CachePage* new_page = new CachePage();
     entry->value = new_page;
@@ -900,9 +896,7 @@
 
 
 // Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
-                             intptr_t start,
-                             int size) {
+void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start, int size) {
   DCHECK(size <= CachePage::kPageSize);
   DCHECK(AllOnOnePage(start, size - 1));
   DCHECK((start & CachePage::kLineMask) == 0);
@@ -914,9 +908,7 @@
   memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
 }
 
-
-void Simulator::CheckICache(v8::internal::HashMap* i_cache,
-                            Instruction* instr) {
+void Simulator::CheckICache(base::HashMap* i_cache, Instruction* instr) {
   intptr_t address = reinterpret_cast<intptr_t>(instr);
   void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
   void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@@ -949,7 +941,7 @@
 Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
   i_cache_ = isolate_->simulator_i_cache();
   if (i_cache_ == NULL) {
-    i_cache_ = new v8::internal::HashMap(&ICacheMatch);
+    i_cache_ = new base::HashMap(&ICacheMatch);
     isolate_->set_simulator_i_cache(i_cache_);
   }
   Initialize(isolate);
@@ -1062,10 +1054,10 @@
 
 
 // static
-void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
+void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
   Redirection::DeleteChain(first);
   if (i_cache != nullptr) {
-    for (HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
+    for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
          entry = i_cache->Next(entry)) {
       delete static_cast<CachePage*>(entry->value);
     }
@@ -2088,15 +2080,17 @@
           case ExternalReference::BUILTIN_FP_FP_CALL:
           case ExternalReference::BUILTIN_COMPARE_CALL:
             PrintF("Call to host function at %p with args %f, %f",
-                   FUNCTION_ADDR(generic_target), dval0, dval1);
+                   static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0,
+                   dval1);
             break;
           case ExternalReference::BUILTIN_FP_CALL:
             PrintF("Call to host function at %p with arg %f",
-                FUNCTION_ADDR(generic_target), dval0);
+                   static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0);
             break;
           case ExternalReference::BUILTIN_FP_INT_CALL:
             PrintF("Call to host function at %p with args %f, %d",
-                   FUNCTION_ADDR(generic_target), dval0, ival);
+                   static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0,
+                   ival);
             break;
           default:
             UNREACHABLE();
@@ -2195,13 +2189,15 @@
         PrintF(
             "Call to host triple returning runtime function %p "
             "args %08x, %08x, %08x, %08x, %08x\n",
-            FUNCTION_ADDR(target), arg1, arg2, arg3, arg4, arg5);
+            static_cast<void*>(FUNCTION_ADDR(target)), arg1, arg2, arg3, arg4,
+            arg5);
       }
       // arg0 is a hidden argument pointing to the return location, so don't
       // pass it to the target function.
       ObjectTriple result = target(arg1, arg2, arg3, arg4, arg5);
       if (::v8::internal::FLAG_trace_sim) {
-        PrintF("Returned { %p, %p, %p }\n", result.x, result.y, result.z);
+        PrintF("Returned { %p, %p, %p }\n", static_cast<void*>(result.x),
+               static_cast<void*>(result.y), static_cast<void*>(result.z));
       }
       // Return is passed back in address pointed to by hidden first argument.
       ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(arg0);
@@ -2216,13 +2212,8 @@
         PrintF(
             "Call to host function at %p "
             "args %08x, %08x, %08x, %08x, %08x, %08x\n",
-            FUNCTION_ADDR(target),
-            arg0,
-            arg1,
-            arg2,
-            arg3,
-            arg4,
-            arg5);
+            static_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2, arg3,
+            arg4, arg5);
       }
       int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
       set_register(v0, static_cast<int32_t>(result));
@@ -3842,12 +3833,51 @@
           alu_out = static_cast<int32_t>(output);
           break;
         }
-        case SEB:
-        case SEH:
-        case WSBH:
-          alu_out = 0x12345678;
-          UNREACHABLE();
+        case SEB: {
+          uint8_t input = static_cast<uint8_t>(rt());
+          uint32_t output = input;
+          uint32_t mask = 0x00000080;
+
+          // Extending sign
+          if (mask & input) {
+            output |= 0xFFFFFF00;
+          }
+
+          alu_out = static_cast<int32_t>(output);
           break;
+        }
+        case SEH: {
+          uint16_t input = static_cast<uint16_t>(rt());
+          uint32_t output = input;
+          uint32_t mask = 0x00008000;
+
+          // Extending sign
+          if (mask & input) {
+            output |= 0xFFFF0000;
+          }
+
+          alu_out = static_cast<int32_t>(output);
+          break;
+        }
+        case WSBH: {
+          uint32_t input = static_cast<uint32_t>(rt());
+          uint32_t output = 0;
+
+          uint32_t mask = 0xFF000000;
+          for (int i = 0; i < 4; i++) {
+            uint32_t tmp = mask & input;
+            if (i % 2 == 0) {
+              tmp = tmp >> 8;
+            } else {
+              tmp = tmp << 8;
+            }
+            output = output | tmp;
+            mask = mask >> 8;
+          }
+
+          alu_out = static_cast<int32_t>(output);
+          break;
+        }
         default: {
           const uint8_t bp = get_instr()->Bp2Value();
           sa >>= kBp2Bits;
diff --git a/src/mips/simulator-mips.h b/src/mips/simulator-mips.h
index e1c42fd..5c77756 100644
--- a/src/mips/simulator-mips.h
+++ b/src/mips/simulator-mips.h
@@ -75,7 +75,7 @@
 // Running with a simulator.
 
 #include "src/assembler.h"
-#include "src/hashmap.h"
+#include "src/base/hashmap.h"
 
 namespace v8 {
 namespace internal {
@@ -216,7 +216,7 @@
   // Call on program start.
   static void Initialize(Isolate* isolate);
 
-  static void TearDown(HashMap* i_cache, Redirection* first);
+  static void TearDown(base::HashMap* i_cache, Redirection* first);
 
   // V8 generally calls into generated JS code with 5 parameters and into
   // generated RegExp code with 7 parameters. This is a convenience function,
@@ -236,8 +236,7 @@
   char* last_debugger_input() { return last_debugger_input_; }
 
   // ICache checking.
-  static void FlushICache(v8::internal::HashMap* i_cache, void* start,
-                          size_t size);
+  static void FlushICache(base::HashMap* i_cache, void* start, size_t size);
 
   // Returns true if pc register contains one of the 'special_values' defined
   // below (bad_ra, end_sim_pc).
@@ -401,10 +400,9 @@
   }
 
   // ICache.
-  static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
-  static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
-                           int size);
-  static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
+  static void CheckICache(base::HashMap* i_cache, Instruction* instr);
+  static void FlushOnePage(base::HashMap* i_cache, intptr_t start, int size);
+  static CachePage* GetCachePage(base::HashMap* i_cache, void* page);
 
   enum Exception {
     none,
@@ -450,7 +448,7 @@
   char* last_debugger_input_;
 
   // Icache simulation.
-  v8::internal::HashMap* i_cache_;
+  base::HashMap* i_cache_;
 
   v8::internal::Isolate* isolate_;
 
diff --git a/src/mips64/assembler-mips64-inl.h b/src/mips64/assembler-mips64-inl.h
index 7903094..82267ed 100644
--- a/src/mips64/assembler-mips64-inl.h
+++ b/src/mips64/assembler-mips64-inl.h
@@ -445,6 +445,8 @@
   CheckTrampolinePoolQuick();
 }
 
+template <>
+inline void Assembler::EmitHelper(uint8_t x);
 
 template <typename T>
 void Assembler::EmitHelper(T x) {
@@ -453,6 +455,14 @@
   CheckTrampolinePoolQuick();
 }
 
+template <>
+void Assembler::EmitHelper(uint8_t x) {
+  *reinterpret_cast<uint8_t*>(pc_) = x;
+  pc_ += sizeof(x);
+  if (reinterpret_cast<intptr_t>(pc_) % kInstrSize == 0) {
+    CheckTrampolinePoolQuick();
+  }
+}
 
 void Assembler::emit(Instr x, CompactBranchType is_compact_branch) {
   if (!is_buffer_growth_blocked()) {
diff --git a/src/mips64/assembler-mips64.cc b/src/mips64/assembler-mips64.cc
index 2b8bc72..1dece1c 100644
--- a/src/mips64/assembler-mips64.cc
+++ b/src/mips64/assembler-mips64.cc
@@ -172,37 +172,26 @@
   return Assembler::target_address_at(pc_, host_);
 }
 
+Address RelocInfo::wasm_global_reference() {
+  DCHECK(IsWasmGlobalReference(rmode_));
+  return Assembler::target_address_at(pc_, host_);
+}
+
 uint32_t RelocInfo::wasm_memory_size_reference() {
   DCHECK(IsWasmMemorySizeReference(rmode_));
   return static_cast<uint32_t>(
       reinterpret_cast<intptr_t>((Assembler::target_address_at(pc_, host_))));
 }
 
-void RelocInfo::update_wasm_memory_reference(
-    Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
-    ICacheFlushMode icache_flush_mode) {
-  DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
-  if (IsWasmMemoryReference(rmode_)) {
-    Address updated_memory_reference;
-    DCHECK(old_base <= wasm_memory_reference() &&
-           wasm_memory_reference() < old_base + old_size);
-    updated_memory_reference = new_base + (wasm_memory_reference() - old_base);
-    DCHECK(new_base <= updated_memory_reference &&
-           updated_memory_reference < new_base + new_size);
-    Assembler::set_target_address_at(
-        isolate_, pc_, host_, updated_memory_reference, icache_flush_mode);
-  } else if (IsWasmMemorySizeReference(rmode_)) {
-    uint32_t updated_size_reference;
-    DCHECK(wasm_memory_size_reference() <= old_size);
-    updated_size_reference =
-        new_size + (wasm_memory_size_reference() - old_size);
-    DCHECK(updated_size_reference <= new_size);
-    Assembler::set_target_address_at(
-        isolate_, pc_, host_, reinterpret_cast<Address>(updated_size_reference),
-        icache_flush_mode);
-  } else {
-    UNREACHABLE();
-  }
+void RelocInfo::unchecked_update_wasm_memory_reference(
+    Address address, ICacheFlushMode flush_mode) {
+  Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
+}
+
+void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
+                                                  ICacheFlushMode flush_mode) {
+  Assembler::set_target_address_at(isolate_, pc_, host_,
+                                   reinterpret_cast<Address>(size), flush_mode);
 }
 
 // -----------------------------------------------------------------------------
@@ -311,6 +300,8 @@
       static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
   desc->origin = this;
   desc->constant_pool_size = 0;
+  desc->unwinding_info_size = 0;
+  desc->unwinding_info = nullptr;
 }
 
 
@@ -1263,7 +1254,6 @@
 
 
 void Assembler::bal(int16_t offset) {
-  positions_recorder()->WriteRecordedPositions();
   bgezal(zero_reg, offset);
 }
 
@@ -1276,7 +1266,6 @@
 
 void Assembler::balc(int32_t offset) {
   DCHECK(kArchVariant == kMips64r6);
-  positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
 }
 
@@ -1323,7 +1312,6 @@
 void Assembler::bgezal(Register rs, int16_t offset) {
   DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
   BlockTrampolinePoolScope block_trampoline_pool(this);
-  positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
   BlockTrampolinePoolFor(1);  // For associated delay slot.
 }
@@ -1394,7 +1382,6 @@
 void Assembler::bltzal(Register rs, int16_t offset) {
   DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
   BlockTrampolinePoolScope block_trampoline_pool(this);
-  positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
   BlockTrampolinePoolFor(1);  // For associated delay slot.
 }
@@ -1430,7 +1417,6 @@
 void Assembler::blezalc(Register rt, int16_t offset) {
   DCHECK(kArchVariant == kMips64r6);
   DCHECK(!(rt.is(zero_reg)));
-  positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(BLEZ, zero_reg, rt, offset,
                     CompactBranchType::COMPACT_BRANCH);
 }
@@ -1439,7 +1425,6 @@
 void Assembler::bgezalc(Register rt, int16_t offset) {
   DCHECK(kArchVariant == kMips64r6);
   DCHECK(!(rt.is(zero_reg)));
-  positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
 }
 
@@ -1448,7 +1433,6 @@
   DCHECK(kArchVariant != kMips64r6);
   DCHECK(!(rs.is(zero_reg)));
   BlockTrampolinePoolScope block_trampoline_pool(this);
-  positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
   BlockTrampolinePoolFor(1);  // For associated delay slot.
 }
@@ -1457,7 +1441,6 @@
 void Assembler::bltzalc(Register rt, int16_t offset) {
   DCHECK(kArchVariant == kMips64r6);
   DCHECK(!(rt.is(zero_reg)));
-  positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
 }
 
@@ -1465,7 +1448,6 @@
 void Assembler::bgtzalc(Register rt, int16_t offset) {
   DCHECK(kArchVariant == kMips64r6);
   DCHECK(!(rt.is(zero_reg)));
-  positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(BGTZ, zero_reg, rt, offset,
                     CompactBranchType::COMPACT_BRANCH);
 }
@@ -1474,7 +1456,6 @@
 void Assembler::beqzalc(Register rt, int16_t offset) {
   DCHECK(kArchVariant == kMips64r6);
   DCHECK(!(rt.is(zero_reg)));
-  positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(ADDI, zero_reg, rt, offset,
                     CompactBranchType::COMPACT_BRANCH);
 }
@@ -1483,7 +1464,6 @@
 void Assembler::bnezalc(Register rt, int16_t offset) {
   DCHECK(kArchVariant == kMips64r6);
   DCHECK(!(rt.is(zero_reg)));
-  positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(DADDI, zero_reg, rt, offset,
                     CompactBranchType::COMPACT_BRANCH);
 }
@@ -1549,7 +1529,6 @@
   uint64_t imm = jump_offset(target);
   if (target->is_bound()) {
     BlockTrampolinePoolScope block_trampoline_pool(this);
-    positions_recorder()->WriteRecordedPositions();
     GenInstrJump(static_cast<Opcode>(kJalRawMark),
                  static_cast<uint32_t>(imm >> 2) & kImm26Mask);
     BlockTrampolinePoolFor(1);  // For associated delay slot.
@@ -1562,9 +1541,6 @@
 void Assembler::jr(Register rs) {
   if (kArchVariant != kMips64r6) {
     BlockTrampolinePoolScope block_trampoline_pool(this);
-    if (rs.is(ra)) {
-      positions_recorder()->WriteRecordedPositions();
-    }
     GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
     BlockTrampolinePoolFor(1);  // For associated delay slot.
   } else {
@@ -1575,7 +1551,6 @@
 
 void Assembler::jal(int64_t target) {
   BlockTrampolinePoolScope block_trampoline_pool(this);
-  positions_recorder()->WriteRecordedPositions();
   GenInstrJump(JAL, static_cast<uint32_t>(target >> 2) & kImm26Mask);
   BlockTrampolinePoolFor(1);  // For associated delay slot.
 }
@@ -1584,7 +1559,6 @@
 void Assembler::jalr(Register rs, Register rd) {
   DCHECK(rs.code() != rd.code());
   BlockTrampolinePoolScope block_trampoline_pool(this);
-  positions_recorder()->WriteRecordedPositions();
   GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
   BlockTrampolinePoolFor(1);  // For associated delay slot.
 }
@@ -1598,7 +1572,6 @@
 
 void Assembler::jialc(Register rt, int16_t offset) {
   DCHECK(kArchVariant == kMips64r6);
-  positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(POP76, zero_reg, rt, offset);
 }
 
@@ -2545,6 +2518,30 @@
   GenInstrRegister(SPECIAL3, rs, rt, rd, sa, DBSHFL);
 }
 
+void Assembler::wsbh(Register rd, Register rt) {
+  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
+  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, WSBH, BSHFL);
+}
+
+void Assembler::dsbh(Register rd, Register rt) {
+  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
+  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, DSBH, DBSHFL);
+}
+
+void Assembler::dshd(Register rd, Register rt) {
+  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
+  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, DSHD, DBSHFL);
+}
+
+void Assembler::seh(Register rd, Register rt) {
+  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
+  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEH, BSHFL);
+}
+
+void Assembler::seb(Register rd, Register rt) {
+  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
+  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL);
+}
 
 // --------Coprocessor-instructions----------------
 
@@ -3452,7 +3449,6 @@
   }
 }
 
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/mips64/assembler-mips64.h b/src/mips64/assembler-mips64.h
index f93bc48..ff3611d 100644
--- a/src/mips64/assembler-mips64.h
+++ b/src/mips64/assembler-mips64.h
@@ -125,8 +125,6 @@
     return r;
   }
 
-  const char* ToString();
-  bool IsAllocatable() const;
   bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
   bool is(Register reg) const { return reg_code == reg.reg_code; }
   int code() const {
@@ -155,6 +153,8 @@
 
 Register ToRegister(int num);
 
+static const bool kSimpleFPAliasing = true;
+
 // Coprocessor register.
 struct FPURegister {
   enum Code {
@@ -173,8 +173,6 @@
   // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
   // number of Double regs (64-bit regs, or FPU-reg-pairs).
 
-  const char* ToString();
-  bool IsAllocatable() const;
   bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
   bool is(FPURegister reg) const { return reg_code == reg.reg_code; }
   FPURegister low() const {
@@ -907,6 +905,12 @@
   void align(Register rd, Register rs, Register rt, uint8_t bp);
   void dalign(Register rd, Register rs, Register rt, uint8_t bp);
 
+  void wsbh(Register rd, Register rt);
+  void dsbh(Register rd, Register rt);
+  void dshd(Register rd, Register rt);
+  void seh(Register rd, Register rt);
+  void seb(Register rd, Register rt);
+
   // --------Coprocessor-instructions----------------
 
   // Load, store, and move.
diff --git a/src/mips64/builtins-mips64.cc b/src/mips64/builtins-mips64.cc
index 7a0d81a..025093e 100644
--- a/src/mips64/builtins-mips64.cc
+++ b/src/mips64/builtins-mips64.cc
@@ -16,10 +16,7 @@
 
 #define __ ACCESS_MASM(masm)
 
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
-                                CFunctionId id,
-                                BuiltinExtraArguments extra_args) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
   // ----------- S t a t e -------------
   //  -- a0                 : number of arguments excluding receiver
   //  -- a1                 : target
@@ -38,23 +35,8 @@
   __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
 
   // Insert extra arguments.
-  int num_extra_args = 0;
-  switch (extra_args) {
-    case BuiltinExtraArguments::kTarget:
-      __ Push(a1);
-      ++num_extra_args;
-      break;
-    case BuiltinExtraArguments::kNewTarget:
-      __ Push(a3);
-      ++num_extra_args;
-      break;
-    case BuiltinExtraArguments::kTargetAndNewTarget:
-      __ Push(a1, a3);
-      num_extra_args += 2;
-      break;
-    case BuiltinExtraArguments::kNone:
-      break;
-  }
+  const int num_extra_args = 2;
+  __ Push(a1, a3);
 
   // JumpToExternalReference expects a0 to contain the number of arguments
   // including the receiver and the extra arguments.
@@ -144,6 +126,8 @@
 void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
   // ----------- S t a t e -------------
   //  -- a0                 : number of arguments
+  //  -- a1                 : function
+  //  -- cp                 : context
   //  -- ra                 : return address
   //  -- sp[(argc - n) * 8] : arg[n] (zero-based)
   //  -- sp[(argc + 1) * 8] : receiver
@@ -153,9 +137,9 @@
                                      : Heap::kMinusInfinityValueRootIndex;
 
   // Load the accumulator with the default return value (either -Infinity or
-  // +Infinity), with the tagged value in a1 and the double value in f0.
-  __ LoadRoot(a1, root_index);
-  __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+  // +Infinity), with the tagged value in t1 and the double value in f0.
+  __ LoadRoot(t1, root_index);
+  __ ldc1(f0, FieldMemOperand(t1, HeapNumber::kValueOffset));
   __ Addu(a3, a0, 1);
 
   Label done_loop, loop;
@@ -170,35 +154,39 @@
     __ ld(a2, MemOperand(at));
 
     // Load the double value of the parameter into f2, maybe converting the
-    // parameter to a number first using the ToNumberStub if necessary.
+    // parameter to a number first using the ToNumber builtin if necessary.
     Label convert, convert_smi, convert_number, done_convert;
     __ bind(&convert);
     __ JumpIfSmi(a2, &convert_smi);
     __ ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset));
     __ JumpIfRoot(a4, Heap::kHeapNumberMapRootIndex, &convert_number);
     {
-      // Parameter is not a Number, use the ToNumberStub to convert it.
-      FrameScope scope(masm, StackFrame::INTERNAL);
+      // Parameter is not a Number, use the ToNumber builtin to convert it.
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ Push(ra, fp);
+      __ Move(fp, sp);
+      __ Push(cp, a1);
       __ SmiTag(a0);
       __ SmiTag(a3);
-      __ Push(a0, a1, a3);
+      __ Push(a0, t1, a3);
       __ mov(a0, a2);
-      ToNumberStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
       __ mov(a2, v0);
-      __ Pop(a0, a1, a3);
+      __ Pop(a0, t1, a3);
       {
         // Restore the double accumulator value (f0).
         Label restore_smi, done_restore;
-        __ JumpIfSmi(a1, &restore_smi);
-        __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+        __ JumpIfSmi(t1, &restore_smi);
+        __ ldc1(f0, FieldMemOperand(t1, HeapNumber::kValueOffset));
         __ jmp(&done_restore);
         __ bind(&restore_smi);
-        __ SmiToDoubleFPURegister(a1, f0, a4);
+        __ SmiToDoubleFPURegister(t1, f0, a4);
         __ bind(&done_restore);
       }
       __ SmiUntag(a3);
       __ SmiUntag(a0);
+      __ Pop(cp, a1);
+      __ Pop(ra, fp);
     }
     __ jmp(&convert);
     __ bind(&convert_number);
@@ -223,20 +211,20 @@
     }
     __ Move(at, f0);
     __ Branch(&loop, eq, a4, Operand(at));
-    __ mov(a1, a2);
+    __ mov(t1, a2);
     __ jmp(&loop);
 
     // At least one side is NaN, which means that the result will be NaN too.
     __ bind(&compare_nan);
-    __ LoadRoot(a1, Heap::kNanValueRootIndex);
-    __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+    __ LoadRoot(t1, Heap::kNanValueRootIndex);
+    __ ldc1(f0, FieldMemOperand(t1, HeapNumber::kValueOffset));
     __ jmp(&loop);
   }
 
   __ bind(&done_loop);
   __ Dlsa(sp, sp, a3, kPointerSizeLog2);
   __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a1);  // In delay slot.
+  __ mov(v0, t1);  // In delay slot.
 }
 
 // static
@@ -261,8 +249,7 @@
   }
 
   // 2a. Convert first argument to number.
-  ToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
+  __ Jump(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
 
   // 2b. No arguments, return +0.
   __ bind(&no_arguments);
@@ -309,8 +296,7 @@
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
       __ Push(a1, a3);
-      ToNumberStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
       __ Move(a0, v0);
       __ Pop(a1, a3);
     }
@@ -694,8 +680,8 @@
   __ AssertGeneratorObject(a1);
 
   // Store input value into generator object.
-  __ sd(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOffset));
-  __ RecordWriteField(a1, JSGeneratorObject::kInputOffset, v0, a3,
+  __ sd(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
+  __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3,
                       kRAHasNotBeenSaved, kDontSaveFPRegs);
 
   // Store resume mode into generator object.
@@ -706,20 +692,22 @@
   __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
 
   // Flood function if we are stepping.
-  Label skip_flooding;
-  ExternalReference step_in_enabled =
-      ExternalReference::debug_step_in_enabled_address(masm->isolate());
-  __ li(t1, Operand(step_in_enabled));
-  __ lb(t1, MemOperand(t1));
-  __ Branch(&skip_flooding, eq, t1, Operand(zero_reg));
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ Push(a1, a2, a4);
-    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
-    __ Pop(a1, a2);
-    __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
-  }
-  __ bind(&skip_flooding);
+  Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+  Label stepping_prepared;
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(masm->isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  __ li(a5, Operand(last_step_action));
+  __ lb(a5, MemOperand(a5));
+  __ Branch(&prepare_step_in_if_stepping, ge, a5, Operand(StepIn));
+
+  // Flood function if we need to continue stepping in the suspended generator.
+  ExternalReference debug_suspended_generator =
+      ExternalReference::debug_suspended_generator_address(masm->isolate());
+  __ li(a5, Operand(debug_suspended_generator));
+  __ ld(a5, MemOperand(a5));
+  __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(a5));
+  __ bind(&stepping_prepared);
 
   // Push receiver.
   __ ld(a5, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
@@ -763,7 +751,6 @@
     __ ld(a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
     __ lw(a0,
          FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
-    __ SmiUntag(a0);
     // We abuse new.target both to indicate that this is a resume call and to
     // pass in the generator object.  In ordinary calls, new.target is always
     // undefined because generator functions are non-constructable.
@@ -816,6 +803,26 @@
     __ Move(v0, a1);  // Continuation expects generator object in v0.
     __ Jump(a3);
   }
+
+  __ bind(&prepare_step_in_if_stepping);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(a1, a2, a4);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(a1, a2);
+  }
+  __ Branch(USE_DELAY_SLOT, &stepping_prepared);
+  __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+
+  __ bind(&prepare_step_in_suspended_generator);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(a1, a2);
+    __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+    __ Pop(a1, a2);
+  }
+  __ Branch(USE_DELAY_SLOT, &stepping_prepared);
+  __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
 }
 
 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
@@ -940,6 +947,21 @@
   Generate_JSEntryTrampolineHelper(masm, true);
 }
 
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
+  Register args_count = scratch;
+
+  // Get the arguments + receiver count.
+  __ ld(args_count,
+        MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ lw(t0, FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+  // Leave the frame (also dropping the register file).
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+
+  // Drop receiver + arguments.
+  __ Daddu(sp, sp, args_count);
+}
+
 // Generate code for entering a JS function with the interpreter.
 // On entry to the function the receiver and arguments have been pushed on the
 // stack left to right.  The actual argument count matches the formal parameter
@@ -1042,16 +1064,7 @@
   masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
 
   // The return value is in v0.
-
-  // Get the arguments + reciever count.
-  __ ld(t0, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
-  __ lw(t0, FieldMemOperand(t0, BytecodeArray::kParameterSizeOffset));
-
-  // Leave the frame (also dropping the register file).
-  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
-
-  // Drop receiver + arguments and return.
-  __ Daddu(sp, sp, t0);
+  LeaveInterpreterFrame(masm, t0);
   __ Jump(ra);
 
   // Load debug copy of the bytecode array.
@@ -1073,6 +1086,31 @@
   __ Jump(a4);
 }
 
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+  // Save the function and context for call to CompileBaseline.
+  __ ld(a1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+  __ ld(kContextRegister,
+        MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+  // Leave the frame before recompiling for baseline so that we don't count as
+  // an activation on the stack.
+  LeaveInterpreterFrame(masm, t0);
+
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    // Push return value.
+    __ push(v0);
+
+    // Push function as argument and compile for baseline.
+    __ push(a1);
+    __ CallRuntime(Runtime::kCompileBaseline);
+
+    // Restore return value.
+    __ pop(v0);
+  }
+  __ Jump(ra);
+}
+
 // static
 void Builtins::Generate_InterpreterPushArgsAndCallImpl(
     MacroAssembler* masm, TailCallMode tail_call_mode) {
@@ -1229,13 +1267,27 @@
                               SharedFunctionInfo::kOffsetToPreviousOsrAstId));
   const int bailout_id = BailoutId::None().ToInt();
   __ Branch(&loop_bottom, ne, temp, Operand(Smi::FromInt(bailout_id)));
+
   // Literals available?
+  Label got_literals, maybe_cleared_weakcell;
   __ ld(temp, FieldMemOperand(array_pointer,
                               SharedFunctionInfo::kOffsetToPreviousLiterals));
+  // temp contains either a WeakCell pointing to the literals array or the
+  // literals array directly.
+  __ ld(a4, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ JumpIfSmi(a4, &maybe_cleared_weakcell);
+  // a4 is a pointer, therefore temp is a WeakCell pointing to a literals array.
   __ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
-  __ JumpIfSmi(temp, &gotta_call_runtime);
+  __ jmp(&got_literals);
+
+  // a4 is a smi. If it's 0, then we are looking at a cleared WeakCell
+  // around the literals array, and we should visit the runtime. If it's > 0,
+  // then temp already contains the literals array.
+  __ bind(&maybe_cleared_weakcell);
+  __ Branch(&gotta_call_runtime, eq, a4, Operand(Smi::FromInt(0)));
 
   // Save the literals in the closure.
+  __ bind(&got_literals);
   __ ld(a4, MemOperand(sp, 0));
   __ sd(temp, FieldMemOperand(a4, JSFunction::kLiteralsOffset));
   __ push(index);
@@ -1650,6 +1702,9 @@
 void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
                                                int field_index) {
   // ----------- S t a t e -------------
+  //  -- a0                 : number of arguments
+  //  -- a1                 : function
+  //  -- cp                 : context
   //  -- sp[0] : receiver
   // -----------------------------------
 
@@ -1689,7 +1744,14 @@
 
   // 3. Raise a TypeError if the receiver is not a date.
   __ bind(&receiver_not_date);
-  __ TailCallRuntime(Runtime::kThrowNotDateError);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ Push(a0, ra, fp);
+    __ Move(fp, sp);
+    __ Push(cp, a1);
+    __ Push(Smi::FromInt(0));
+    __ CallRuntime(Runtime::kThrowNotDateError);
+  }
 }
 
 // static
@@ -2686,6 +2748,83 @@
   __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
 }
 
+// static
+void Builtins::Generate_StringToNumber(MacroAssembler* masm) {
+  // The StringToNumber stub takes on argument in a0.
+  __ AssertString(a0);
+
+  // Check if string has a cached array index.
+  Label runtime;
+  __ lwu(a2, FieldMemOperand(a0, String::kHashFieldOffset));
+  __ And(at, a2, Operand(String::kContainsCachedArrayIndexMask));
+  __ Branch(&runtime, ne, at, Operand(zero_reg));
+  __ IndexFromHash(a2, v0);
+  __ Ret();
+
+  __ bind(&runtime);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    // Push argument.
+    __ Push(a0);
+    // We cannot use a tail call here because this builtin can also be called
+    // from wasm.
+    __ CallRuntime(Runtime::kStringToNumber);
+  }
+  __ Ret();
+}
+
+// static
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+  // The ToNumber stub takes one argument in a0.
+  Label not_smi;
+  __ JumpIfNotSmi(a0, &not_smi);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
+  __ bind(&not_smi);
+
+  Label not_heap_number;
+  __ GetObjectType(a0, a1, a1);
+  // a0: receiver
+  // a1: receiver instance type
+  __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
+  __ bind(&not_heap_number);
+
+  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+          RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_NonNumberToNumber(MacroAssembler* masm) {
+  // The NonNumberToNumber stub takes on argument in a0.
+  __ AssertNotNumber(a0);
+
+  Label not_string;
+  __ GetObjectType(a0, a1, a1);
+  // a0: receiver
+  // a1: receiver instance type
+  __ Branch(&not_string, hs, a1, Operand(FIRST_NONSTRING_TYPE));
+  __ Jump(masm->isolate()->builtins()->StringToNumber(),
+          RelocInfo::CODE_TARGET);
+  __ bind(&not_string);
+
+  Label not_oddball;
+  __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
+  __ Ret(USE_DELAY_SLOT);
+  __ ld(v0, FieldMemOperand(a0, Oddball::kToNumberOffset));  // In delay slot.
+  __ bind(&not_oddball);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    // Push argument.
+    __ Push(a0);
+    // We cannot use a tail call here because this builtin can also be called
+    // from wasm.
+    __ CallRuntime(Runtime::kToNumber);
+  }
+  __ Ret();
+}
+
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // State setup as expected by MacroAssembler::InvokePrologue.
   // ----------- S t a t e -------------
diff --git a/src/mips64/code-stubs-mips64.cc b/src/mips64/code-stubs-mips64.cc
index 5702c78..89eff90 100644
--- a/src/mips64/code-stubs-mips64.cc
+++ b/src/mips64/code-stubs-mips64.cc
@@ -20,70 +20,29 @@
 namespace v8 {
 namespace internal {
 
+#define __ ACCESS_MASM(masm)
 
-static void InitializeArrayConstructorDescriptor(
-    Isolate* isolate, CodeStubDescriptor* descriptor,
-    int constant_stack_parameter_count) {
-  Address deopt_handler = Runtime::FunctionForId(
-      Runtime::kArrayConstructor)->entry;
-
-  if (constant_stack_parameter_count == 0) {
-    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  } else {
-    descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  }
+void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
+  __ dsll(t9, a0, kPointerSizeLog2);
+  __ Daddu(t9, sp, t9);
+  __ sd(a1, MemOperand(t9, 0));
+  __ Push(a1);
+  __ Push(a2);
+  __ Daddu(a0, a0, 3);
+  __ TailCallRuntime(Runtime::kNewArray);
 }
 
-
-static void InitializeInternalArrayConstructorDescriptor(
-    Isolate* isolate, CodeStubDescriptor* descriptor,
-    int constant_stack_parameter_count) {
-  Address deopt_handler = Runtime::FunctionForId(
-      Runtime::kInternalArrayConstructor)->entry;
-
-  if (constant_stack_parameter_count == 0) {
-    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  } else {
-    descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  }
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
   Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
   descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
 }
 
-void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+void FastFunctionBindStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
+  Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
+  descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
 }
 
-
-void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
-#define __ ACCESS_MASM(masm)
-
 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
                                           Condition cc);
 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
@@ -1019,7 +978,7 @@
   CEntryStub::GenerateAheadOfTime(isolate);
   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
-  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+  CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
   CreateWeakCellStub::GenerateAheadOfTime(isolate);
   BinaryOpICStub::GenerateAheadOfTime(isolate);
@@ -1436,7 +1395,6 @@
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
                                           &miss,  // When index out of range.
-                                          STRING_INDEX_IS_ARRAY_INDEX,
                                           RECEIVER_IS_STRING);
   char_at_generator.GenerateFast(masm);
   __ Ret();
@@ -1923,6 +1881,7 @@
   // a2 : feedback vector
   // a3 : slot in feedback vector (Smi)
   Label initialize, done, miss, megamorphic, not_array_function;
+  Label done_initialize_count, done_increment_count;
 
   DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
             masm->isolate()->heap()->megamorphic_symbol());
@@ -1942,7 +1901,7 @@
   Register feedback_map = a6;
   Register weak_value = t0;
   __ ld(weak_value, FieldMemOperand(a5, WeakCell::kValueOffset));
-  __ Branch(&done, eq, a1, Operand(weak_value));
+  __ Branch(&done_increment_count, eq, a1, Operand(weak_value));
   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
   __ Branch(&done, eq, a5, Operand(at));
   __ ld(feedback_map, FieldMemOperand(a5, HeapObject::kMapOffset));
@@ -1964,7 +1923,7 @@
   // Make sure the function is the Array() function
   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5);
   __ Branch(&megamorphic, ne, a1, Operand(a5));
-  __ jmp(&done);
+  __ jmp(&done_increment_count);
 
   __ bind(&miss);
 
@@ -1992,12 +1951,31 @@
   // slot.
   CreateAllocationSiteStub create_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &create_stub);
-  __ Branch(&done);
+  __ Branch(&done_initialize_count);
 
   __ bind(&not_array_function);
 
   CreateWeakCellStub weak_cell_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &weak_cell_stub);
+
+  __ bind(&done_initialize_count);
+  // Initialize the call counter.
+
+  __ SmiScale(a4, a3, kPointerSizeLog2);
+  __ Daddu(a4, a2, Operand(a4));
+  __ li(a5, Operand(Smi::FromInt(1)));
+  __ Branch(USE_DELAY_SLOT, &done);
+  __ sd(a5, FieldMemOperand(a4, FixedArray::kHeaderSize + kPointerSize));
+
+  __ bind(&done_increment_count);
+
+  // Increment the call count for monomorphic function calls.
+  __ SmiScale(a4, a3, kPointerSizeLog2);
+  __ Daddu(a5, a2, Operand(a4));
+  __ ld(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
+  __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
+  __ sd(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
+
   __ bind(&done);
 }
 
@@ -2100,7 +2078,7 @@
   __ dsrl(t0, a3, 32 - kPointerSizeLog2);
   __ Daddu(a3, a2, Operand(t0));
   __ ld(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
-  __ Daddu(t0, t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+  __ Daddu(t0, t0, Operand(Smi::FromInt(1)));
   __ sd(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
 
   __ mov(a2, a4);
@@ -2148,7 +2126,7 @@
   __ dsrl(t0, a3, 32 - kPointerSizeLog2);
   __ Daddu(a3, a2, Operand(t0));
   __ ld(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
-  __ Daddu(t0, t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+  __ Daddu(t0, t0, Operand(Smi::FromInt(1)));
   __ sd(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
 
   __ bind(&call_function);
@@ -2221,7 +2199,7 @@
   // Initialize the call counter.
   __ dsrl(at, a3, 32 - kPointerSizeLog2);
   __ Daddu(at, a2, Operand(at));
-  __ li(t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+  __ li(t0, Operand(Smi::FromInt(1)));
   __ sd(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
 
   // Store the function. Use a stub since we need a frame for allocation.
@@ -2282,13 +2260,7 @@
   } else {
     __ Push(object_, index_);
   }
-  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
-    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
-  } else {
-    DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
-    // NumberToSmi discards numbers that are not exact integers.
-    __ CallRuntime(Runtime::kNumberToSmi);
-  }
+  __ CallRuntime(Runtime::kNumberToSmi);
 
   // Save the conversion result before the pop instructions below
   // have a chance to overwrite it.
@@ -2625,74 +2597,12 @@
   // a3: from index (untagged)
   __ SmiTag(a3);
   StringCharAtGenerator generator(v0, a3, a2, v0, &runtime, &runtime, &runtime,
-                                  STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
+                                  RECEIVER_IS_STRING);
   generator.GenerateFast(masm);
   __ DropAndRet(3);
   generator.SkipSlow(masm, &runtime);
 }
 
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
-  // The ToNumber stub takes one argument in a0.
-  Label not_smi;
-  __ JumpIfNotSmi(a0, &not_smi);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);
-  __ bind(&not_smi);
-
-  Label not_heap_number;
-  __ GetObjectType(a0, a1, a1);
-  // a0: receiver
-  // a1: receiver instance type
-  __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);
-  __ bind(&not_heap_number);
-
-  NonNumberToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
-}
-
-void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
-  // The NonNumberToNumber stub takes on argument in a0.
-  __ AssertNotNumber(a0);
-
-  Label not_string;
-  __ GetObjectType(a0, a1, a1);
-  // a0: receiver
-  // a1: receiver instance type
-  __ Branch(&not_string, hs, a1, Operand(FIRST_NONSTRING_TYPE));
-  StringToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
-  __ bind(&not_string);
-
-  Label not_oddball;
-  __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
-  __ Ret(USE_DELAY_SLOT);
-  __ ld(v0, FieldMemOperand(a0, Oddball::kToNumberOffset));  // In delay slot.
-  __ bind(&not_oddball);
-
-  __ Push(a0);  // Push argument.
-  __ TailCallRuntime(Runtime::kToNumber);
-}
-
-void StringToNumberStub::Generate(MacroAssembler* masm) {
-  // The StringToNumber stub takes on argument in a0.
-  __ AssertString(a0);
-
-  // Check if string has a cached array index.
-  Label runtime;
-  __ lwu(a2, FieldMemOperand(a0, String::kHashFieldOffset));
-  __ And(at, a2, Operand(String::kContainsCachedArrayIndexMask));
-  __ Branch(&runtime, ne, at, Operand(zero_reg));
-  __ IndexFromHash(a2, v0);
-  __ Ret();
-
-  __ bind(&runtime);
-  __ Push(a0);  // Push argument.
-  __ TailCallRuntime(Runtime::kStringToNumber);
-}
-
 void ToStringStub::Generate(MacroAssembler* masm) {
   // The ToString stub takes on argument in a0.
   Label is_number;
@@ -2873,7 +2783,7 @@
   // Load a2 with the allocation site. We stick an undefined dummy value here
   // and replace it with the real allocation site later when we instantiate this
   // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
-  __ li(a2, handle(isolate()->heap()->undefined_value()));
+  __ li(a2, isolate()->factory()->undefined_value());
 
   // Make sure that we actually patched the allocation site.
   if (FLAG_debug_code) {
@@ -3768,14 +3678,14 @@
 
 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  LoadICStub stub(isolate(), state());
+  LoadICStub stub(isolate());
   stub.GenerateForTrampoline(masm);
 }
 
 
 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  KeyedLoadICStub stub(isolate(), state());
+  KeyedLoadICStub stub(isolate());
   stub.GenerateForTrampoline(masm);
 }
 
@@ -4394,19 +4304,13 @@
   }
 }
 
-
-void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
       isolate);
   ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
       isolate);
-  ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
-      isolate);
-}
-
-
-void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
-    Isolate* isolate) {
+  ArrayNArgumentsConstructorStub stub(isolate);
+  stub.GetCode();
   ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
   for (int i = 0; i < 2; i++) {
     // For internal arrays we only need a few things.
@@ -4414,8 +4318,6 @@
     stubh1.GetCode();
     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
     stubh2.GetCode();
-    InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
-    stubh3.GetCode();
   }
 }
 
@@ -4434,13 +4336,15 @@
     CreateArrayDispatchOneArgument(masm, mode);
 
     __ bind(&not_one_case);
-    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+    ArrayNArgumentsConstructorStub stub(masm->isolate());
+    __ TailCallStub(&stub);
   } else if (argument_count() == NONE) {
     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
   } else if (argument_count() == ONE) {
     CreateArrayDispatchOneArgument(masm, mode);
   } else if (argument_count() == MORE_THAN_ONE) {
-    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+    ArrayNArgumentsConstructorStub stub(masm->isolate());
+    __ TailCallStub(&stub);
   } else {
     UNREACHABLE();
   }
@@ -4524,7 +4428,7 @@
   InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
   __ TailCallStub(&stub0, lo, a0, Operand(1));
 
-  InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+  ArrayNArgumentsConstructorStub stubN(isolate());
   __ TailCallStub(&stubN, hi, a0, Operand(1));
 
   if (IsFastPackedElementsKind(kind)) {
@@ -4750,10 +4654,10 @@
   Label rest_parameters;
   __ SmiLoadUntag(
       a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ ld(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-  __ lw(a1,
-        FieldMemOperand(a1, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ Dsubu(a0, a0, Operand(a1));
+  __ ld(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(a3,
+        FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ Dsubu(a0, a0, Operand(a3));
   __ Branch(&rest_parameters, gt, a0, Operand(zero_reg));
 
   // Return an empty rest parameter array.
@@ -4800,15 +4704,16 @@
     // ----------- S t a t e -------------
     //  -- cp : context
     //  -- a0 : number of rest parameters
+    //  -- a1 : function
     //  -- a2 : pointer to first rest parameters
     //  -- ra : return address
     // -----------------------------------
 
     // Allocate space for the rest parameter array plus the backing store.
     Label allocate, done_allocate;
-    __ li(a1, Operand(JSArray::kSize + FixedArray::kHeaderSize));
-    __ Dlsa(a1, a1, a0, kPointerSizeLog2);
-    __ Allocate(a1, v0, a3, a4, &allocate, NO_ALLOCATION_FLAGS);
+    __ li(a5, Operand(JSArray::kSize + FixedArray::kHeaderSize));
+    __ Dlsa(a5, a5, a0, kPointerSizeLog2);
+    __ Allocate(a5, v0, a3, a4, &allocate, NO_ALLOCATION_FLAGS);
     __ bind(&done_allocate);
 
     // Compute arguments.length in a4.
@@ -4843,18 +4748,26 @@
     __ Ret(USE_DELAY_SLOT);
     __ mov(v0, a3);  // In delay slot
 
-    // Fall back to %AllocateInNewSpace.
+    // Fall back to %AllocateInNewSpace (if not too big).
+    Label too_big_for_new_space;
     __ bind(&allocate);
+    __ Branch(&too_big_for_new_space, gt, a5,
+              Operand(Page::kMaxRegularHeapObjectSize));
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
       __ SmiTag(a0);
-      __ SmiTag(a1);
-      __ Push(a0, a2, a1);
+      __ SmiTag(a5);
+      __ Push(a0, a2, a5);
       __ CallRuntime(Runtime::kAllocateInNewSpace);
       __ Pop(a0, a2);
       __ SmiUntag(a0);
     }
     __ jmp(&done_allocate);
+
+    // Fall back to %NewStrictArguments.
+    __ bind(&too_big_for_new_space);
+    __ Push(a1);
+    __ TailCallRuntime(Runtime::kNewStrictArguments);
   }
 }
 
@@ -5126,9 +5039,9 @@
   __ Branch(&arguments_adaptor, eq, a0,
             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   {
-    __ ld(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+    __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
     __ lw(a0,
-          FieldMemOperand(a1, SharedFunctionInfo::kFormalParameterCountOffset));
+          FieldMemOperand(a4, SharedFunctionInfo::kFormalParameterCountOffset));
     __ Dlsa(a2, a2, a0, kPointerSizeLog2);
     __ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
                              1 * kPointerSize));
@@ -5147,15 +5060,16 @@
   // ----------- S t a t e -------------
   //  -- cp : context
   //  -- a0 : number of rest parameters
+  //  -- a1 : function
   //  -- a2 : pointer to first rest parameters
   //  -- ra : return address
   // -----------------------------------
 
   // Allocate space for the rest parameter array plus the backing store.
   Label allocate, done_allocate;
-  __ li(a1, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
-  __ Dlsa(a1, a1, a0, kPointerSizeLog2);
-  __ Allocate(a1, v0, a3, a4, &allocate, NO_ALLOCATION_FLAGS);
+  __ li(a5, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+  __ Dlsa(a5, a5, a0, kPointerSizeLog2);
+  __ Allocate(a5, v0, a3, a4, &allocate, NO_ALLOCATION_FLAGS);
   __ bind(&done_allocate);
 
   // Compute arguments.length in a4.
@@ -5190,48 +5104,26 @@
   __ Ret(USE_DELAY_SLOT);
   __ mov(v0, a3);  // In delay slot
 
-  // Fall back to %AllocateInNewSpace.
+  // Fall back to %AllocateInNewSpace (if not too big).
+  Label too_big_for_new_space;
   __ bind(&allocate);
+  __ Branch(&too_big_for_new_space, gt, a5,
+            Operand(Page::kMaxRegularHeapObjectSize));
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     __ SmiTag(a0);
-    __ SmiTag(a1);
-    __ Push(a0, a2, a1);
+    __ SmiTag(a5);
+    __ Push(a0, a2, a5);
     __ CallRuntime(Runtime::kAllocateInNewSpace);
     __ Pop(a0, a2);
     __ SmiUntag(a0);
   }
   __ jmp(&done_allocate);
-}
 
-
-void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
-  Register context_reg = cp;
-  Register slot_reg = a2;
-  Register result_reg = v0;
-  Label slow_case;
-
-  // Go up context chain to the script context.
-  for (int i = 0; i < depth(); ++i) {
-    __ ld(result_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX));
-    context_reg = result_reg;
-  }
-
-  // Load the PropertyCell value at the specified slot.
-  __ Dlsa(at, context_reg, slot_reg, kPointerSizeLog2);
-  __ ld(result_reg, ContextMemOperand(at, 0));
-  __ ld(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset));
-
-  // Check that value is not the_hole.
-  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-  __ Branch(&slow_case, eq, result_reg, Operand(at));
-  __ Ret();
-
-  // Fallback to the runtime.
-  __ bind(&slow_case);
-  __ SmiTag(slot_reg);
-  __ Push(slot_reg);
-  __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
+  // Fall back to %NewStrictArguments.
+  __ bind(&too_big_for_new_space);
+  __ Push(a1);
+  __ TailCallRuntime(Runtime::kNewStrictArguments);
 }
 
 
diff --git a/src/mips64/codegen-mips64.cc b/src/mips64/codegen-mips64.cc
index 678f606..a8f5890 100644
--- a/src/mips64/codegen-mips64.cc
+++ b/src/mips64/codegen-mips64.cc
@@ -17,59 +17,6 @@
 #define __ masm.
 
 
-#if defined(USE_SIMULATOR)
-byte* fast_exp_mips_machine_code = nullptr;
-double fast_exp_simulator(double x, Isolate* isolate) {
-  return Simulator::current(isolate)->CallFP(fast_exp_mips_machine_code, x, 0);
-}
-#endif
-
-
-UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
-  size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
-  if (buffer == nullptr) return nullptr;
-  ExternalReference::InitializeMathExpData();
-
-  MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
-                      CodeObjectRequired::kNo);
-
-  {
-    DoubleRegister input = f12;
-    DoubleRegister result = f0;
-    DoubleRegister double_scratch1 = f4;
-    DoubleRegister double_scratch2 = f6;
-    Register temp1 = a4;
-    Register temp2 = a5;
-    Register temp3 = a6;
-
-    __ MovFromFloatParameter(input);
-    __ Push(temp3, temp2, temp1);
-    MathExpGenerator::EmitMathExp(
-        &masm, input, result, double_scratch1, double_scratch2,
-        temp1, temp2, temp3);
-    __ Pop(temp3, temp2, temp1);
-    __ MovToFloatResult(result);
-    __ Ret();
-  }
-
-  CodeDesc desc;
-  masm.GetCode(&desc);
-  DCHECK(!RelocInfo::RequiresRelocation(desc));
-
-  Assembler::FlushICache(isolate, buffer, actual_size);
-  base::OS::ProtectCode(buffer, actual_size);
-
-#if !defined(USE_SIMULATOR)
-  return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
-#else
-  fast_exp_mips_machine_code = buffer;
-  return &fast_exp_simulator;
-#endif
-}
-
-
 #if defined(V8_HOST_ARCH_MIPS)
 MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
                                                 MemCopyUint8Function stub) {
@@ -1090,94 +1037,6 @@
   __ bind(&done);
 }
 
-
-static MemOperand ExpConstant(int index, Register base) {
-  return MemOperand(base, index * kDoubleSize);
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
-                                   DoubleRegister input,
-                                   DoubleRegister result,
-                                   DoubleRegister double_scratch1,
-                                   DoubleRegister double_scratch2,
-                                   Register temp1,
-                                   Register temp2,
-                                   Register temp3) {
-  DCHECK(!input.is(result));
-  DCHECK(!input.is(double_scratch1));
-  DCHECK(!input.is(double_scratch2));
-  DCHECK(!result.is(double_scratch1));
-  DCHECK(!result.is(double_scratch2));
-  DCHECK(!double_scratch1.is(double_scratch2));
-  DCHECK(!temp1.is(temp2));
-  DCHECK(!temp1.is(temp3));
-  DCHECK(!temp2.is(temp3));
-  DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
-  DCHECK(!masm->serializer_enabled());  // External references not serializable.
-
-  Label zero, infinity, done;
-  __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
-
-  __ ldc1(double_scratch1, ExpConstant(0, temp3));
-  __ BranchF(&zero, NULL, ge, double_scratch1, input);
-
-  __ ldc1(double_scratch2, ExpConstant(1, temp3));
-  __ BranchF(&infinity, NULL, ge, input, double_scratch2);
-
-  __ ldc1(double_scratch1, ExpConstant(3, temp3));
-  __ ldc1(result, ExpConstant(4, temp3));
-  __ mul_d(double_scratch1, double_scratch1, input);
-  __ add_d(double_scratch1, double_scratch1, result);
-  __ FmoveLow(temp2, double_scratch1);
-  __ sub_d(double_scratch1, double_scratch1, result);
-  __ ldc1(result, ExpConstant(6, temp3));
-  __ ldc1(double_scratch2, ExpConstant(5, temp3));
-  __ mul_d(double_scratch1, double_scratch1, double_scratch2);
-  __ sub_d(double_scratch1, double_scratch1, input);
-  __ sub_d(result, result, double_scratch1);
-  __ mul_d(double_scratch2, double_scratch1, double_scratch1);
-  __ mul_d(result, result, double_scratch2);
-  __ ldc1(double_scratch2, ExpConstant(7, temp3));
-  __ mul_d(result, result, double_scratch2);
-  __ sub_d(result, result, double_scratch1);
-  // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
-  DCHECK(*reinterpret_cast<double*>
-         (ExternalReference::math_exp_constants(8).address()) == 1);
-  __ Move(double_scratch2, 1.);
-  __ add_d(result, result, double_scratch2);
-  __ dsrl(temp1, temp2, 11);
-  __ Ext(temp2, temp2, 0, 11);
-  __ Daddu(temp1, temp1, Operand(0x3ff));
-
-  // Must not call ExpConstant() after overwriting temp3!
-  __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
-  __ Dlsa(temp3, temp3, temp2, 3);
-  __ lwu(temp2, MemOperand(temp3, Register::kMantissaOffset));
-  __ lwu(temp3, MemOperand(temp3, Register::kExponentOffset));
-  // The first word is loaded is the lower number register.
-  if (temp2.code() < temp3.code()) {
-    __ dsll(at, temp1, 20);
-    __ Or(temp1, temp3, at);
-    __ Move(double_scratch1, temp2, temp1);
-  } else {
-    __ dsll(at, temp1, 20);
-    __ Or(temp1, temp2, at);
-    __ Move(double_scratch1, temp3, temp1);
-  }
-  __ mul_d(result, result, double_scratch1);
-  __ BranchShort(&done);
-
-  __ bind(&zero);
-  __ Move(result, kDoubleRegZero);
-  __ BranchShort(&done);
-
-  __ bind(&infinity);
-  __ ldc1(result, ExpConstant(2, temp3));
-
-  __ bind(&done);
-}
-
 #ifdef DEBUG
 // nop(CODE_AGE_MARKER_NOP)
 static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
diff --git a/src/mips64/codegen-mips64.h b/src/mips64/codegen-mips64.h
index ad7abb3..a4f8184 100644
--- a/src/mips64/codegen-mips64.h
+++ b/src/mips64/codegen-mips64.h
@@ -29,23 +29,6 @@
   DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
 };
 
-
-class MathExpGenerator : public AllStatic {
- public:
-  // Register input isn't modified. All other registers are clobbered.
-  static void EmitMathExp(MacroAssembler* masm,
-                          DoubleRegister input,
-                          DoubleRegister result,
-                          DoubleRegister double_scratch1,
-                          DoubleRegister double_scratch2,
-                          Register temp1,
-                          Register temp2,
-                          Register temp3);
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/mips64/constants-mips64.h b/src/mips64/constants-mips64.h
index 8272420..d2b1e92 100644
--- a/src/mips64/constants-mips64.h
+++ b/src/mips64/constants-mips64.h
@@ -1237,11 +1237,10 @@
           int sa = SaFieldRaw() >> kSaShift;
           switch (sa) {
             case BITSWAP:
-              return kRegisterType;
             case WSBH:
             case SEB:
             case SEH:
-              return kUnsupported;
+              return kRegisterType;
           }
           sa >>= kBp2Bits;
           switch (sa) {
@@ -1255,10 +1254,9 @@
           int sa = SaFieldRaw() >> kSaShift;
           switch (sa) {
             case DBITSWAP:
-              return kRegisterType;
             case DSBH:
             case DSHD:
-              return kUnsupported;
+              return kRegisterType;
           }
           sa = SaFieldRaw() >> kSaShift;
           sa >>= kBp3Bits;
diff --git a/src/mips64/deoptimizer-mips64.cc b/src/mips64/deoptimizer-mips64.cc
index 90bd11e..ea17124 100644
--- a/src/mips64/deoptimizer-mips64.cc
+++ b/src/mips64/deoptimizer-mips64.cc
@@ -117,8 +117,7 @@
 
   // Save all FPU registers before messing with them.
   __ Dsubu(sp, sp, Operand(kDoubleRegsSize));
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
     int code = config->GetAllocatableDoubleCode(i);
     const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
diff --git a/src/mips64/disasm-mips64.cc b/src/mips64/disasm-mips64.cc
index 7b05493..1917526 100644
--- a/src/mips64/disasm-mips64.cc
+++ b/src/mips64/disasm-mips64.cc
@@ -1457,11 +1457,18 @@
           Format(instr, "bitswap 'rd, 'rt");
           break;
         }
-        case SEB:
-        case SEH:
-        case WSBH:
-          UNREACHABLE();
+        case SEB: {
+          Format(instr, "seb     'rd, 'rt");
           break;
+        }
+        case SEH: {
+          Format(instr, "seh     'rd, 'rt");
+          break;
+        }
+        case WSBH: {
+          Format(instr, "wsbh    'rd, 'rt");
+          break;
+        }
         default: {
           sa >>= kBp2Bits;
           switch (sa) {
@@ -1492,10 +1499,14 @@
           }
           break;
         }
-        case DSBH:
-        case DSHD:
-          UNREACHABLE();
+        case DSBH: {
+          Format(instr, "dsbh    'rd, 'rt");
           break;
+        }
+        case DSHD: {
+          Format(instr, "dshd    'rd, 'rt");
+          break;
+        }
         default: {
           sa >>= kBp3Bits;
           switch (sa) {
@@ -1938,7 +1949,7 @@
 namespace disasm {
 
 const char* NameConverter::NameOfAddress(byte* addr) const {
-  v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+  v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
   return tmp_buffer_.start();
 }
 
@@ -2001,8 +2012,8 @@
     buffer[0] = '\0';
     byte* prev_pc = pc;
     pc += d.InstructionDecode(buffer, pc);
-    v8::internal::PrintF(f, "%p    %08x      %s\n",
-        prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+    v8::internal::PrintF(f, "%p    %08x      %s\n", static_cast<void*>(prev_pc),
+                         *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
   }
 }
 
diff --git a/src/mips64/interface-descriptors-mips64.cc b/src/mips64/interface-descriptors-mips64.cc
index 67bae36..684dca5 100644
--- a/src/mips64/interface-descriptors-mips64.cc
+++ b/src/mips64/interface-descriptors-mips64.cc
@@ -11,6 +11,14 @@
 
 const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
 
+void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
+    CallInterfaceDescriptorData* data, int register_parameter_count) {
+  const Register default_stub_registers[] = {a0, a1, a2, a3, a4};
+  CHECK_LE(static_cast<size_t>(register_parameter_count),
+           arraysize(default_stub_registers));
+  data->InitializePlatformSpecific(register_parameter_count,
+                                   default_stub_registers);
+}
 
 const Register LoadDescriptor::ReceiverRegister() { return a1; }
 const Register LoadDescriptor::NameRegister() { return a2; }
@@ -39,9 +47,6 @@
 const Register StoreTransitionDescriptor::MapRegister() { return a3; }
 
 
-const Register LoadGlobalViaContextDescriptor::SlotRegister() { return a2; }
-
-
 const Register StoreGlobalViaContextDescriptor::SlotRegister() { return a2; }
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return a0; }
 
@@ -63,8 +68,6 @@
 const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
 const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
 
-const Register HasPropertyDescriptor::ObjectRegister() { return a0; }
-const Register HasPropertyDescriptor::KeyRegister() { return a3; }
 
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -254,43 +257,24 @@
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
+void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // register state
   // a0 -- number of arguments
   // a1 -- function
   // a2 -- allocation site with elements kind
-  Register registers[] = {a1, a2};
+  Register registers[] = {a1, a2, a0};
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
+void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // stack param count needs (constructor pointer, and single argument)
   Register registers[] = {a1, a2, a0};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-void InternalArrayConstructorConstantArgCountDescriptor::
-    InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
-  // register state
-  // a0 -- number of arguments
-  // a1 -- constructor function
-  Register registers[] = {a1};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  // stack param count needs (constructor pointer, and single argument)
-  Register registers[] = {a1, a0};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastArrayPushDescriptor::InitializePlatformSpecific(
+void VarArgFunctionDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // stack param count needs (arg count)
   Register registers[] = {a0};
diff --git a/src/mips64/macro-assembler-mips64.cc b/src/mips64/macro-assembler-mips64.cc
index b7b4f28..f8e7e1f 100644
--- a/src/mips64/macro-assembler-mips64.cc
+++ b/src/mips64/macro-assembler-mips64.cc
@@ -17,6 +17,18 @@
 namespace v8 {
 namespace internal {
 
+// Floating point constants.
+const uint64_t kDoubleSignMask = Double::kSignMask;
+const uint32_t kDoubleExponentShift = HeapNumber::kMantissaBits;
+const uint32_t kDoubleNaNShift = kDoubleExponentShift - 1;
+const uint64_t kDoubleNaNMask = Double::kExponentMask | (1L << kDoubleNaNShift);
+
+const uint32_t kSingleSignMask = kBinary32SignMask;
+const uint32_t kSingleExponentMask = kBinary32ExponentMask;
+const uint32_t kSingleExponentShift = kBinary32ExponentShift;
+const uint32_t kSingleNaNShift = kSingleExponentShift - 1;
+const uint32_t kSingleNaNMask = kSingleExponentMask | (1 << kSingleNaNShift);
+
 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
                                CodeObjectRequired create_code_object)
     : Assembler(arg_isolate, buffer, size),
@@ -29,7 +41,6 @@
   }
 }
 
-
 void MacroAssembler::Load(Register dst,
                           const MemOperand& src,
                           Representation r) {
@@ -1324,6 +1335,49 @@
 
 // ------------Pseudo-instructions-------------
 
+// Change endianness
+void MacroAssembler::ByteSwapSigned(Register reg, int operand_size) {
+  DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4 ||
+         operand_size == 8);
+  DCHECK(kArchVariant == kMips64r6 || kArchVariant == kMips64r2);
+  if (operand_size == 1) {
+    seb(reg, reg);
+    sll(reg, reg, 0);
+    dsbh(reg, reg);
+    dshd(reg, reg);
+  } else if (operand_size == 2) {
+    seh(reg, reg);
+    sll(reg, reg, 0);
+    dsbh(reg, reg);
+    dshd(reg, reg);
+  } else if (operand_size == 4) {
+    sll(reg, reg, 0);
+    dsbh(reg, reg);
+    dshd(reg, reg);
+  } else {
+    dsbh(reg, reg);
+    dshd(reg, reg);
+  }
+}
+
+void MacroAssembler::ByteSwapUnsigned(Register reg, int operand_size) {
+  DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4);
+  if (operand_size == 1) {
+    andi(reg, reg, 0xFF);
+    dsbh(reg, reg);
+    dshd(reg, reg);
+  } else if (operand_size == 2) {
+    andi(reg, reg, 0xFFFF);
+    dsbh(reg, reg);
+    dshd(reg, reg);
+  } else {
+    dsll32(reg, reg, 0);
+    dsrl32(reg, reg, 0);
+    dsbh(reg, reg);
+    dshd(reg, reg);
+  }
+}
+
 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
   DCHECK(!rd.is(at));
   DCHECK(!rs.rm().is(at));
@@ -4031,9 +4085,6 @@
   Label start;
   bind(&start);
   int64_t target_int = reinterpret_cast<int64_t>(target);
-  // Must record previous source positions before the
-  // li() generates a new code target.
-  positions_recorder()->WriteRecordedPositions();
   li(t9, Operand(target_int, rmode), ADDRESS_LOAD);
   Call(t9, cond, rs, rt, bd);
   DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
@@ -4854,6 +4905,72 @@
   sdc1(double_result, MemOperand(scratch1, 0));
 }
 
+void MacroAssembler::SubNanPreservePayloadAndSign_s(FPURegister fd,
+                                                    FPURegister fs,
+                                                    FPURegister ft) {
+  FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd;
+  Label check_nan, save_payload, done;
+  Register scratch1 = t8;
+  Register scratch2 = t9;
+
+  sub_s(dest, fs, ft);
+  // Check if the result of subtraction is NaN.
+  BranchF32(nullptr, &check_nan, eq, fs, ft);
+  Branch(USE_DELAY_SLOT, &done);
+  dest.is(fd) ? nop() : mov_s(fd, dest);
+
+  bind(&check_nan);
+  // Check if first operand is a NaN.
+  mfc1(scratch1, fs);
+  BranchF32(nullptr, &save_payload, eq, fs, fs);
+  // Second operand must be a NaN.
+  mfc1(scratch1, ft);
+
+  bind(&save_payload);
+  // Reserve payload.
+  And(scratch1, scratch1,
+      Operand(kSingleSignMask | ((1 << kSingleNaNShift) - 1)));
+  mfc1(scratch2, dest);
+  And(scratch2, scratch2, Operand(kSingleNaNMask));
+  Or(scratch2, scratch2, scratch1);
+  mtc1(scratch2, fd);
+
+  bind(&done);
+}
+
+void MacroAssembler::SubNanPreservePayloadAndSign_d(FPURegister fd,
+                                                    FPURegister fs,
+                                                    FPURegister ft) {
+  FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd;
+  Label check_nan, save_payload, done;
+  Register scratch1 = t8;
+  Register scratch2 = t9;
+
+  sub_d(dest, fs, ft);
+  // Check if the result of subtraction is NaN.
+  BranchF64(nullptr, &check_nan, eq, fs, ft);
+  Branch(USE_DELAY_SLOT, &done);
+  dest.is(fd) ? nop() : mov_d(fd, dest);
+
+  bind(&check_nan);
+  // Check if first operand is a NaN.
+  dmfc1(scratch1, fs);
+  BranchF64(nullptr, &save_payload, eq, fs, fs);
+  // Second operand must be a NaN.
+  dmfc1(scratch1, ft);
+
+  bind(&save_payload);
+  // Reserve payload.
+  li(at, Operand(kDoubleSignMask | (1L << kDoubleNaNShift)));
+  Dsubu(at, at, Operand(1));
+  And(scratch1, scratch1, at);
+  dmfc1(scratch2, dest);
+  And(scratch2, scratch2, Operand(kDoubleNaNMask));
+  Or(scratch2, scratch2, scratch1);
+  dmtc1(scratch2, fd);
+
+  bind(&done);
+}
 
 void MacroAssembler::CompareMapAndBranch(Register obj,
                                          Register scratch,
@@ -5142,11 +5259,12 @@
                                              const ParameterCount& expected,
                                              const ParameterCount& actual) {
   Label skip_flooding;
-  ExternalReference step_in_enabled =
-      ExternalReference::debug_step_in_enabled_address(isolate());
-  li(t0, Operand(step_in_enabled));
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  li(t0, Operand(last_step_action));
   lb(t0, MemOperand(t0));
-  Branch(&skip_flooding, eq, t0, Operand(zero_reg));
+  Branch(&skip_flooding, lt, t0, Operand(StepIn));
   {
     FrameScope frame(this,
                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -6031,9 +6149,8 @@
 
 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
   ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  ld(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
-  ld(vector,
-     FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+  ld(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
+  ld(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
 }
 
 
@@ -7011,8 +7128,7 @@
   if (reg5.is_valid()) regs |= reg5.bit();
   if (reg6.is_valid()) regs |= reg6.bit();
 
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
     int code = config->GetAllocatableGeneralCode(i);
     Register candidate = Register::from_code(code);
diff --git a/src/mips64/macro-assembler-mips64.h b/src/mips64/macro-assembler-mips64.h
index 27a34b3..576d30a 100644
--- a/src/mips64/macro-assembler-mips64.h
+++ b/src/mips64/macro-assembler-mips64.h
@@ -722,6 +722,10 @@
   // ---------------------------------------------------------------------------
   // Pseudo-instructions.
 
+  // Change endianness
+  void ByteSwapSigned(Register reg, int operand_size);
+  void ByteSwapUnsigned(Register reg, int operand_size);
+
   void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
 
   void Ulh(Register rd, const MemOperand& rs);
@@ -929,6 +933,12 @@
   void Floor_w_d(FPURegister fd, FPURegister fs);
   void Ceil_w_d(FPURegister fd, FPURegister fs);
 
+  // Preserve value of a NaN operand
+  void SubNanPreservePayloadAndSign_s(FPURegister fd, FPURegister fs,
+                                      FPURegister ft);
+  void SubNanPreservePayloadAndSign_d(FPURegister fd, FPURegister fs,
+                                      FPURegister ft);
+
   void Madd_d(FPURegister fd,
               FPURegister fr,
               FPURegister fs,
diff --git a/src/mips64/simulator-mips64.cc b/src/mips64/simulator-mips64.cc
index 3157030..ed484ef 100644
--- a/src/mips64/simulator-mips64.cc
+++ b/src/mips64/simulator-mips64.cc
@@ -801,9 +801,7 @@
   last_debugger_input_ = input;
 }
 
-
-void Simulator::FlushICache(v8::internal::HashMap* i_cache,
-                            void* start_addr,
+void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
                             size_t size) {
   int64_t start = reinterpret_cast<int64_t>(start_addr);
   int64_t intra_line = (start & CachePage::kLineMask);
@@ -824,10 +822,8 @@
   }
 }
 
-
-CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
-  v8::internal::HashMap::Entry* entry =
-      i_cache->LookupOrInsert(page, ICacheHash(page));
+CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
+  base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
   if (entry->value == NULL) {
     CachePage* new_page = new CachePage();
     entry->value = new_page;
@@ -837,7 +833,7 @@
 
 
 // Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
+void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start,
                              size_t size) {
   DCHECK(size <= CachePage::kPageSize);
   DCHECK(AllOnOnePage(start, size - 1));
@@ -850,9 +846,7 @@
   memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
 }
 
-
-void Simulator::CheckICache(v8::internal::HashMap* i_cache,
-                            Instruction* instr) {
+void Simulator::CheckICache(base::HashMap* i_cache, Instruction* instr) {
   int64_t address = reinterpret_cast<int64_t>(instr);
   void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
   void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@@ -885,7 +879,7 @@
 Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
   i_cache_ = isolate_->simulator_i_cache();
   if (i_cache_ == NULL) {
-    i_cache_ = new v8::internal::HashMap(&ICacheMatch);
+    i_cache_ = new base::HashMap(&ICacheMatch);
     isolate_->set_simulator_i_cache(i_cache_);
   }
   Initialize(isolate);
@@ -1000,10 +994,10 @@
 
 
 // static
-void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
+void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
   Redirection::DeleteChain(first);
   if (i_cache != nullptr) {
-    for (HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
+    for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
          entry = i_cache->Next(entry)) {
       delete static_cast<CachePage*>(entry->value);
     }
@@ -2077,15 +2071,17 @@
           case ExternalReference::BUILTIN_FP_FP_CALL:
           case ExternalReference::BUILTIN_COMPARE_CALL:
             PrintF("Call to host function at %p with args %f, %f",
-                   FUNCTION_ADDR(generic_target), dval0, dval1);
+                   static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0,
+                   dval1);
             break;
           case ExternalReference::BUILTIN_FP_CALL:
             PrintF("Call to host function at %p with arg %f",
-                FUNCTION_ADDR(generic_target), dval0);
+                   static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0);
             break;
           case ExternalReference::BUILTIN_FP_INT_CALL:
             PrintF("Call to host function at %p with args %f, %d",
-                   FUNCTION_ADDR(generic_target), dval0, ival);
+                   static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0,
+                   ival);
             break;
           default:
             UNREACHABLE();
@@ -2188,13 +2184,15 @@
             "Call to host triple returning runtime function %p "
             "args %016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", %016" PRIx64
             ", %016" PRIx64 "\n",
-            FUNCTION_ADDR(target), arg1, arg2, arg3, arg4, arg5);
+            static_cast<void*>(FUNCTION_ADDR(target)), arg1, arg2, arg3, arg4,
+            arg5);
       }
       // arg0 is a hidden argument pointing to the return location, so don't
       // pass it to the target function.
       ObjectTriple result = target(arg1, arg2, arg3, arg4, arg5);
       if (::v8::internal::FLAG_trace_sim) {
-        PrintF("Returned { %p, %p, %p }\n", result.x, result.y, result.z);
+        PrintF("Returned { %p, %p, %p }\n", static_cast<void*>(result.x),
+               static_cast<void*>(result.y), static_cast<void*>(result.z));
       }
       // Return is passed back in address pointed to by hidden first argument.
       ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(arg0);
@@ -2210,7 +2208,8 @@
             "Call to host function at %p "
             "args %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64
             " , %08" PRIx64 " , %08" PRIx64 " \n",
-            FUNCTION_ADDR(target), arg0, arg1, arg2, arg3, arg4, arg5);
+            static_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2, arg3,
+            arg4, arg5);
       }
       // int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
       // set_register(v0, static_cast<int32_t>(result));
@@ -3978,12 +3977,57 @@
           alu_out = static_cast<int64_t>(static_cast<int32_t>(output));
           break;
         }
-        case SEB:
-        case SEH:
-        case WSBH:
-          alu_out = 0x12345678;
-          UNREACHABLE();
+        case SEB: {
+          uint8_t input = static_cast<uint8_t>(rt());
+          uint32_t output = input;
+          uint32_t mask = 0x00000080;
+
+          // Extending sign
+          if (mask & input) {
+            output |= 0xFFFFFF00;
+          }
+
+          alu_out = static_cast<int32_t>(output);
           break;
+        }
+        case SEH: {
+          uint16_t input = static_cast<uint16_t>(rt());
+          uint32_t output = input;
+          uint32_t mask = 0x00008000;
+
+          // Extending sign
+          if (mask & input) {
+            output |= 0xFFFF0000;
+          }
+
+          alu_out = static_cast<int32_t>(output);
+          break;
+        }
+        case WSBH: {
+          uint32_t input = static_cast<uint32_t>(rt());
+          uint64_t output = 0;
+
+          uint32_t mask = 0xFF000000;
+          for (int i = 0; i < 4; i++) {
+            uint32_t tmp = mask & input;
+            if (i % 2 == 0) {
+              tmp = tmp >> 8;
+            } else {
+              tmp = tmp << 8;
+            }
+            output = output | tmp;
+            mask = mask >> 8;
+          }
+          mask = 0x80000000;
+
+          // Extending sign
+          if (mask & output) {
+            output |= 0xFFFFFFFF00000000;
+          }
+
+          alu_out = static_cast<int64_t>(output);
+          break;
+        }
         default: {
           const uint8_t bp2 = get_instr()->Bp2Value();
           sa >>= kBp2Bits;
@@ -4042,11 +4086,47 @@
           }
           break;
         }
-        case DSBH:
-        case DSHD:
-          alu_out = 0x12345678;
-          UNREACHABLE();
+        case DSBH: {
+          uint64_t input = static_cast<uint64_t>(rt());
+          uint64_t output = 0;
+
+          uint64_t mask = 0xFF00000000000000;
+          for (int i = 0; i < 8; i++) {
+            uint64_t tmp = mask & input;
+            if (i % 2 == 0)
+              tmp = tmp >> 8;
+            else
+              tmp = tmp << 8;
+
+            output = output | tmp;
+            mask = mask >> 8;
+          }
+
+          alu_out = static_cast<int64_t>(output);
           break;
+        }
+        case DSHD: {
+          uint64_t input = static_cast<uint64_t>(rt());
+          uint64_t output = 0;
+
+          uint64_t mask = 0xFFFF000000000000;
+          for (int i = 0; i < 4; i++) {
+            uint64_t tmp = mask & input;
+            if (i == 0)
+              tmp = tmp >> 48;
+            else if (i == 1)
+              tmp = tmp >> 16;
+            else if (i == 2)
+              tmp = tmp << 16;
+            else
+              tmp = tmp << 48;
+            output = output | tmp;
+            mask = mask >> 16;
+          }
+
+          alu_out = static_cast<int64_t>(output);
+          break;
+        }
         default: {
           const uint8_t bp3 = get_instr()->Bp3Value();
           sa >>= kBp3Bits;
@@ -4096,31 +4176,7 @@
       DecodeTypeRegisterSPECIAL2();
       break;
     case SPECIAL3:
-      switch (instr->FunctionFieldRaw()) {
-        case BSHFL: {
-          int32_t saVal = sa();
-          saVal >>= kBp2Bits;
-          switch (saVal) {
-            case ALIGN: {
-              DecodeTypeRegisterSPECIAL3();
-              break;
-            }
-          }
-        }
-        case DBSHFL: {
-          int32_t saVal = sa();
-          saVal >>= kBp2Bits;
-          switch (saVal) {
-            case DALIGN: {
-              DecodeTypeRegisterSPECIAL3();
-              break;
-            }
-          }
-        }
-        default:
-          DecodeTypeRegisterSPECIAL3();
-          break;
-      }
+      DecodeTypeRegisterSPECIAL3();
       break;
     // Unimplemented opcodes raised an error in the configuration step before,
     // so we can use the default here to set the destination register in common
diff --git a/src/mips64/simulator-mips64.h b/src/mips64/simulator-mips64.h
index 7f60a74..cd606e2 100644
--- a/src/mips64/simulator-mips64.h
+++ b/src/mips64/simulator-mips64.h
@@ -84,7 +84,7 @@
 // Running with a simulator.
 
 #include "src/assembler.h"
-#include "src/hashmap.h"
+#include "src/base/hashmap.h"
 
 namespace v8 {
 namespace internal {
@@ -226,7 +226,7 @@
   // Call on program start.
   static void Initialize(Isolate* isolate);
 
-  static void TearDown(HashMap* i_cache, Redirection* first);
+  static void TearDown(base::HashMap* i_cache, Redirection* first);
 
   // V8 generally calls into generated JS code with 5 parameters and into
   // generated RegExp code with 7 parameters. This is a convenience function,
@@ -246,8 +246,7 @@
   char* last_debugger_input() { return last_debugger_input_; }
 
   // ICache checking.
-  static void FlushICache(v8::internal::HashMap* i_cache, void* start,
-                          size_t size);
+  static void FlushICache(base::HashMap* i_cache, void* start, size_t size);
 
   // Returns true if pc register contains one of the 'special_values' defined
   // below (bad_ra, end_sim_pc).
@@ -415,10 +414,9 @@
   }
 
   // ICache.
-  static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
-  static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
-                           size_t size);
-  static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
+  static void CheckICache(base::HashMap* i_cache, Instruction* instr);
+  static void FlushOnePage(base::HashMap* i_cache, intptr_t start, size_t size);
+  static CachePage* GetCachePage(base::HashMap* i_cache, void* page);
 
   enum Exception {
     none,
@@ -463,7 +461,7 @@
   char* last_debugger_input_;
 
   // Icache simulation.
-  v8::internal::HashMap* i_cache_;
+  base::HashMap* i_cache_;
 
   v8::internal::Isolate* isolate_;
 
diff --git a/src/objects-body-descriptors-inl.h b/src/objects-body-descriptors-inl.h
index ee2e01e..cfa945d 100644
--- a/src/objects-body-descriptors-inl.h
+++ b/src/objects-body-descriptors-inl.h
@@ -456,6 +456,8 @@
     case TRANSITION_ARRAY_TYPE:
       return Op::template apply<TransitionArray::BodyDescriptor>(p1, p2, p3);
     case JS_OBJECT_TYPE:
+    case JS_ERROR_TYPE:
+    case JS_ARGUMENTS_TYPE:
     case JS_PROMISE_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_GENERATOR_OBJECT_TYPE:
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index a8728fc..d3d13d4 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -99,6 +99,8 @@
       Oddball::cast(this)->OddballVerify();
       break;
     case JS_OBJECT_TYPE:
+    case JS_ERROR_TYPE:
+    case JS_ARGUMENTS_TYPE:
     case JS_API_OBJECT_TYPE:
     case JS_SPECIAL_API_OBJECT_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
@@ -211,7 +213,7 @@
   CHECK(IsSymbol());
   CHECK(HasHashCode());
   CHECK(Hash() > 0u);
-  CHECK(name()->IsUndefined() || name()->IsString());
+  CHECK(name()->IsUndefined(GetIsolate()) || name()->IsString());
 }
 
 
@@ -289,6 +291,7 @@
                actual_unused_property_fields - JSObject::kFieldsAdded);
     }
     DescriptorArray* descriptors = map()->instance_descriptors();
+    Isolate* isolate = GetIsolate();
     for (int i = 0; i < map()->NumberOfOwnDescriptors(); i++) {
       if (descriptors->GetDetails(i).type() == DATA) {
         Representation r = descriptors->GetDetails(i).representation();
@@ -299,7 +302,7 @@
         }
         Object* value = RawFastPropertyAt(index);
         if (r.IsDouble()) DCHECK(value->IsMutableHeapNumber());
-        if (value->IsUninitialized()) continue;
+        if (value->IsUninitialized(isolate)) continue;
         if (r.IsSmi()) DCHECK(value->IsSmi());
         if (r.IsHeapObject()) DCHECK(value->IsHeapObject());
         FieldType* field_type = descriptors->GetFieldType(i);
@@ -312,7 +315,7 @@
           // object literal creation and we will end up having and undefined
           // value that does not match the field type.
           CHECK(!field_type->NowStable() || field_type->NowContains(value) ||
-                (!FLAG_use_allocation_folding && value->IsUndefined()));
+                (!FLAG_use_allocation_folding && value->IsUndefined(isolate)));
         }
       }
     }
@@ -338,7 +341,7 @@
   CHECK(instance_size() == kVariableSizeSentinel ||
          (kPointerSize <= instance_size() &&
           instance_size() < heap->Capacity()));
-  CHECK(GetBackPointer()->IsUndefined() ||
+  CHECK(GetBackPointer()->IsUndefined(heap->isolate()) ||
         !Map::cast(GetBackPointer())->is_stable());
   VerifyHeapPointer(prototype());
   VerifyHeapPointer(instance_descriptors());
@@ -412,7 +415,7 @@
     VerifyPointer(e);
   }
   CHECK_LE(LengthFor(number_of_transitions()), length());
-  CHECK(next_link()->IsUndefined() || next_link()->IsSmi() ||
+  CHECK(next_link()->IsUndefined(GetIsolate()) || next_link()->IsSmi() ||
         next_link()->IsTransitionArray());
 }
 
@@ -432,7 +435,7 @@
 void JSModule::JSModuleVerify() {
   VerifyObjectField(kContextOffset);
   VerifyObjectField(kScopeInfoOffset);
-  CHECK(context()->IsUndefined() ||
+  CHECK(context()->IsUndefined(GetIsolate()) ||
         Context::cast(context())->IsModuleContext());
 }
 
@@ -449,16 +452,18 @@
   if (value()->IsHeapObject()) {
     VerifyHeapPointer(value());
   }
-  CHECK(value()->IsUndefined() || value()->IsSmi() || value()->IsHeapNumber());
-  CHECK(year()->IsUndefined() || year()->IsSmi() || year()->IsNaN());
-  CHECK(month()->IsUndefined() || month()->IsSmi() || month()->IsNaN());
-  CHECK(day()->IsUndefined() || day()->IsSmi() || day()->IsNaN());
-  CHECK(weekday()->IsUndefined() || weekday()->IsSmi() || weekday()->IsNaN());
-  CHECK(hour()->IsUndefined() || hour()->IsSmi() || hour()->IsNaN());
-  CHECK(min()->IsUndefined() || min()->IsSmi() || min()->IsNaN());
-  CHECK(sec()->IsUndefined() || sec()->IsSmi() || sec()->IsNaN());
-  CHECK(cache_stamp()->IsUndefined() ||
-        cache_stamp()->IsSmi() ||
+  Isolate* isolate = GetIsolate();
+  CHECK(value()->IsUndefined(isolate) || value()->IsSmi() ||
+        value()->IsHeapNumber());
+  CHECK(year()->IsUndefined(isolate) || year()->IsSmi() || year()->IsNaN());
+  CHECK(month()->IsUndefined(isolate) || month()->IsSmi() || month()->IsNaN());
+  CHECK(day()->IsUndefined(isolate) || day()->IsSmi() || day()->IsNaN());
+  CHECK(weekday()->IsUndefined(isolate) || weekday()->IsSmi() ||
+        weekday()->IsNaN());
+  CHECK(hour()->IsUndefined(isolate) || hour()->IsSmi() || hour()->IsNaN());
+  CHECK(min()->IsUndefined(isolate) || min()->IsSmi() || min()->IsNaN());
+  CHECK(sec()->IsUndefined(isolate) || sec()->IsSmi() || sec()->IsNaN());
+  CHECK(cache_stamp()->IsUndefined(isolate) || cache_stamp()->IsSmi() ||
         cache_stamp()->IsNaN());
 
   if (month()->IsSmi()) {
@@ -487,7 +492,7 @@
   }
   if (cache_stamp()->IsSmi()) {
     CHECK(Smi::cast(cache_stamp())->value() <=
-          Smi::cast(GetIsolate()->date_cache()->stamp())->value());
+          Smi::cast(isolate->date_cache()->stamp())->value());
   }
 }
 
@@ -555,7 +560,7 @@
   VerifyObjectField(kNextFunctionLinkOffset);
   CHECK(code()->IsCode());
   CHECK(next_function_link() == NULL ||
-        next_function_link()->IsUndefined() ||
+        next_function_link()->IsUndefined(GetIsolate()) ||
         next_function_link()->IsJSFunction());
   CHECK(map()->is_callable());
 }
@@ -566,14 +571,17 @@
   VerifyObjectField(kNameOffset);
   VerifyObjectField(kCodeOffset);
   VerifyObjectField(kOptimizedCodeMapOffset);
-  VerifyObjectField(kFeedbackVectorOffset);
+  VerifyObjectField(kFeedbackMetadataOffset);
   VerifyObjectField(kScopeInfoOffset);
   VerifyObjectField(kInstanceClassNameOffset);
-  CHECK(function_data()->IsUndefined() || IsApiFunction() ||
-        HasBuiltinFunctionId() || HasBytecodeArray());
+  CHECK(function_data()->IsUndefined(GetIsolate()) || IsApiFunction() ||
+        HasBytecodeArray());
   VerifyObjectField(kFunctionDataOffset);
   VerifyObjectField(kScriptOffset);
   VerifyObjectField(kDebugInfoOffset);
+  CHECK(function_identifier()->IsUndefined(GetIsolate()) ||
+        HasBuiltinFunctionId() || HasInferredName());
+  VerifyObjectField(kFunctionIdentifierOffset);
 }
 
 
@@ -708,12 +716,12 @@
 
 void JSArray::JSArrayVerify() {
   JSObjectVerify();
-  CHECK(length()->IsNumber() || length()->IsUndefined());
+  Isolate* isolate = GetIsolate();
+  CHECK(length()->IsNumber() || length()->IsUndefined(isolate));
   // If a GC was caused while constructing this array, the elements
   // pointer may point to a one pointer filler map.
   if (ElementsAreSafeToExamine()) {
-    CHECK(elements()->IsUndefined() ||
-          elements()->IsFixedArray() ||
+    CHECK(elements()->IsUndefined(isolate) || elements()->IsFixedArray() ||
           elements()->IsFixedDoubleArray());
   }
 }
@@ -723,7 +731,7 @@
   CHECK(IsJSSet());
   JSObjectVerify();
   VerifyHeapPointer(table());
-  CHECK(table()->IsOrderedHashTable() || table()->IsUndefined());
+  CHECK(table()->IsOrderedHashTable() || table()->IsUndefined(GetIsolate()));
   // TODO(arv): Verify OrderedHashTable too.
 }
 
@@ -732,7 +740,7 @@
   CHECK(IsJSMap());
   JSObjectVerify();
   VerifyHeapPointer(table());
-  CHECK(table()->IsOrderedHashTable() || table()->IsUndefined());
+  CHECK(table()->IsOrderedHashTable() || table()->IsUndefined(GetIsolate()));
   // TODO(arv): Verify OrderedHashTable too.
 }
 
@@ -741,9 +749,10 @@
   CHECK(IsJSSetIterator());
   JSObjectVerify();
   VerifyHeapPointer(table());
-  CHECK(table()->IsOrderedHashTable() || table()->IsUndefined());
-  CHECK(index()->IsSmi() || index()->IsUndefined());
-  CHECK(kind()->IsSmi() || kind()->IsUndefined());
+  Isolate* isolate = GetIsolate();
+  CHECK(table()->IsOrderedHashTable() || table()->IsUndefined(isolate));
+  CHECK(index()->IsSmi() || index()->IsUndefined(isolate));
+  CHECK(kind()->IsSmi() || kind()->IsUndefined(isolate));
 }
 
 
@@ -751,9 +760,10 @@
   CHECK(IsJSMapIterator());
   JSObjectVerify();
   VerifyHeapPointer(table());
-  CHECK(table()->IsOrderedHashTable() || table()->IsUndefined());
-  CHECK(index()->IsSmi() || index()->IsUndefined());
-  CHECK(kind()->IsSmi() || kind()->IsUndefined());
+  Isolate* isolate = GetIsolate();
+  CHECK(table()->IsOrderedHashTable() || table()->IsUndefined(isolate));
+  CHECK(index()->IsSmi() || index()->IsUndefined(isolate));
+  CHECK(kind()->IsSmi() || kind()->IsUndefined(isolate));
 }
 
 
@@ -761,7 +771,7 @@
   CHECK(IsJSWeakMap());
   JSObjectVerify();
   VerifyHeapPointer(table());
-  CHECK(table()->IsHashTable() || table()->IsUndefined());
+  CHECK(table()->IsHashTable() || table()->IsUndefined(GetIsolate()));
 }
 
 
@@ -769,13 +779,14 @@
   CHECK(IsJSWeakSet());
   JSObjectVerify();
   VerifyHeapPointer(table());
-  CHECK(table()->IsHashTable() || table()->IsUndefined());
+  CHECK(table()->IsHashTable() || table()->IsUndefined(GetIsolate()));
 }
 
 
 void JSRegExp::JSRegExpVerify() {
   JSObjectVerify();
-  CHECK(data()->IsUndefined() || data()->IsFixedArray());
+  Isolate* isolate = GetIsolate();
+  CHECK(data()->IsUndefined(isolate) || data()->IsFixedArray());
   switch (TypeTag()) {
     case JSRegExp::ATOM: {
       FixedArray* arr = FixedArray::cast(data());
@@ -811,7 +822,7 @@
     }
     default:
       CHECK_EQ(JSRegExp::NOT_COMPILED, TypeTag());
-      CHECK(data()->IsUndefined());
+      CHECK(data()->IsUndefined(isolate));
       break;
   }
 }
@@ -821,10 +832,11 @@
   CHECK(IsJSProxy());
   VerifyPointer(target());
   VerifyPointer(handler());
+  Isolate* isolate = GetIsolate();
   CHECK_EQ(target()->IsCallable(), map()->is_callable());
   CHECK_EQ(target()->IsConstructor(), map()->is_constructor());
-  CHECK(hash()->IsSmi() || hash()->IsUndefined());
-  CHECK(map()->prototype()->IsNull());
+  CHECK(hash()->IsSmi() || hash()->IsUndefined(isolate));
+  CHECK(map()->prototype()->IsNull(isolate));
   // There should be no properties on a Proxy.
   CHECK_EQ(0, map()->NumberOfOwnDescriptors());
 }
@@ -834,8 +846,8 @@
   CHECK(IsJSArrayBuffer());
   JSObjectVerify();
   VerifyPointer(byte_length());
-  CHECK(byte_length()->IsSmi() || byte_length()->IsHeapNumber()
-        || byte_length()->IsUndefined());
+  CHECK(byte_length()->IsSmi() || byte_length()->IsHeapNumber() ||
+        byte_length()->IsUndefined(GetIsolate()));
 }
 
 
@@ -843,16 +855,17 @@
   CHECK(IsJSArrayBufferView());
   JSObjectVerify();
   VerifyPointer(buffer());
-  CHECK(buffer()->IsJSArrayBuffer() || buffer()->IsUndefined()
-        || buffer() == Smi::FromInt(0));
+  Isolate* isolate = GetIsolate();
+  CHECK(buffer()->IsJSArrayBuffer() || buffer()->IsUndefined(isolate) ||
+        buffer() == Smi::FromInt(0));
 
   VerifyPointer(raw_byte_offset());
   CHECK(raw_byte_offset()->IsSmi() || raw_byte_offset()->IsHeapNumber() ||
-        raw_byte_offset()->IsUndefined());
+        raw_byte_offset()->IsUndefined(isolate));
 
   VerifyPointer(raw_byte_length());
   CHECK(raw_byte_length()->IsSmi() || raw_byte_length()->IsHeapNumber() ||
-        raw_byte_length()->IsUndefined());
+        raw_byte_length()->IsUndefined(isolate));
 }
 
 
@@ -861,7 +874,7 @@
   JSArrayBufferViewVerify();
   VerifyPointer(raw_length());
   CHECK(raw_length()->IsSmi() || raw_length()->IsHeapNumber() ||
-        raw_length()->IsUndefined());
+        raw_length()->IsUndefined(GetIsolate()));
 
   VerifyPointer(elements());
 }
@@ -923,9 +936,9 @@
 
 void AccessCheckInfo::AccessCheckInfoVerify() {
   CHECK(IsAccessCheckInfo());
-  VerifyPointer(named_callback());
-  VerifyPointer(indexed_callback());
   VerifyPointer(callback());
+  VerifyPointer(named_interceptor());
+  VerifyPointer(indexed_interceptor());
   VerifyPointer(data());
 }
 
@@ -1003,12 +1016,13 @@
 void NormalizedMapCache::NormalizedMapCacheVerify() {
   FixedArray::cast(this)->FixedArrayVerify();
   if (FLAG_enable_slow_asserts) {
+    Isolate* isolate = GetIsolate();
     for (int i = 0; i < length(); i++) {
       Object* e = FixedArray::get(i);
       if (e->IsMap()) {
         Map::cast(e)->DictionaryMapVerify();
       } else {
-        CHECK(e->IsUndefined());
+        CHECK(e->IsUndefined(isolate));
       }
     }
   }
@@ -1062,9 +1076,9 @@
       int holes = 0;
       FixedArray* e = FixedArray::cast(elements());
       int len = e->length();
-      Heap* heap = GetHeap();
+      Isolate* isolate = GetIsolate();
       for (int i = 0; i < len; i++) {
-        if (e->get(i) == heap->the_hole_value()) holes++;
+        if (e->get(i)->IsTheHole(isolate)) holes++;
       }
       info->number_of_fast_used_elements_   += len - holes;
       info->number_of_fast_unused_elements_ += holes;
diff --git a/src/objects-inl.h b/src/objects-inl.h
index a64d9ff..e1df2b6 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -14,6 +14,7 @@
 
 #include "src/base/atomicops.h"
 #include "src/base/bits.h"
+#include "src/builtins.h"
 #include "src/contexts-inl.h"
 #include "src/conversions-inl.h"
 #include "src/factory.h"
@@ -22,8 +23,9 @@
 #include "src/handles-inl.h"
 #include "src/heap/heap-inl.h"
 #include "src/heap/heap.h"
-#include "src/isolate.h"
 #include "src/isolate-inl.h"
+#include "src/isolate.h"
+#include "src/keys.h"
 #include "src/layout-descriptor-inl.h"
 #include "src/lookup.h"
 #include "src/objects.h"
@@ -159,6 +161,15 @@
     return IsHeapObject() && HeapObject::cast(this)->Is##type_(); \
   }
 HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DEF)
+#undef IS_TYPE_FUNCTION_DEF
+
+#define IS_TYPE_FUNCTION_DEF(Type, Value)             \
+  bool Object::Is##Type(Isolate* isolate) const {     \
+    return this == isolate->heap()->Value();          \
+  }                                                   \
+  bool HeapObject::Is##Type(Isolate* isolate) const { \
+    return this == isolate->heap()->Value();          \
+  }
 ODDBALL_LIST(IS_TYPE_FUNCTION_DEF)
 #undef IS_TYPE_FUNCTION_DEF
 
@@ -244,7 +255,6 @@
          String::cast(this)->IsTwoByteRepresentation();
 }
 
-
 bool Object::HasValidElements() {
   // Dictionary is covered under FixedArray.
   return IsFixedArray() || IsFixedDoubleArray() || IsFixedTypedArrayBase();
@@ -283,12 +293,12 @@
 Handle<Object> Object::NewStorageFor(Isolate* isolate,
                                      Handle<Object> object,
                                      Representation representation) {
-  if (representation.IsSmi() && object->IsUninitialized()) {
+  if (representation.IsSmi() && object->IsUninitialized(isolate)) {
     return handle(Smi::FromInt(0), isolate);
   }
   if (!representation.IsDouble()) return object;
   double value;
-  if (object->IsUninitialized()) {
+  if (object->IsUninitialized(isolate)) {
     value = 0;
   } else if (object->IsMutableHeapNumber()) {
     value = HeapNumber::cast(*object)->value();
@@ -302,7 +312,7 @@
 Handle<Object> Object::WrapForRead(Isolate* isolate,
                                    Handle<Object> object,
                                    Representation representation) {
-  DCHECK(!object->IsUninitialized());
+  DCHECK(!object->IsUninitialized(isolate));
   if (!representation.IsDouble()) {
     DCHECK(object->FitsRepresentation(representation));
     return object;
@@ -776,11 +786,13 @@
 TYPE_CHECKER(PropertyCell, PROPERTY_CELL_TYPE)
 TYPE_CHECKER(WeakCell, WEAK_CELL_TYPE)
 TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
-TYPE_CHECKER(JSGeneratorObject, JS_GENERATOR_OBJECT_TYPE)
-TYPE_CHECKER(JSModule, JS_MODULE_TYPE)
-TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
 TYPE_CHECKER(JSDate, JS_DATE_TYPE)
+TYPE_CHECKER(JSError, JS_ERROR_TYPE)
+TYPE_CHECKER(JSGeneratorObject, JS_GENERATOR_OBJECT_TYPE)
 TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
+TYPE_CHECKER(JSModule, JS_MODULE_TYPE)
+TYPE_CHECKER(JSPromise, JS_PROMISE_TYPE)
+TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
 
 bool HeapObject::IsAbstractCode() const {
   return IsBytecodeArray() || IsCode();
@@ -937,13 +949,6 @@
 STRUCT_LIST(MAKE_STRUCT_PREDICATE)
 #undef MAKE_STRUCT_PREDICATE
 
-#define MAKE_ODDBALL_PREDICATE(Name)                                       \
-  bool HeapObject::Is##Name() const {                                      \
-    return IsOddball() && Oddball::cast(this)->kind() == Oddball::k##Name; \
-  }
-ODDBALL_LIST(MAKE_ODDBALL_PREDICATE)
-
-#undef MAKE_ODDBALL_PREDICATE
 double Object::Number() const {
   DCHECK(IsNumber());
   return IsSmi()
@@ -969,7 +974,8 @@
     return Representation::Smi();
   } else if (FLAG_track_double_fields && IsHeapNumber()) {
     return Representation::Double();
-  } else if (FLAG_track_computed_fields && IsUninitialized()) {
+  } else if (FLAG_track_computed_fields &&
+             IsUninitialized(HeapObject::cast(this)->GetIsolate())) {
     return Representation::None();
   } else if (FLAG_track_heap_object_fields) {
     DCHECK(IsHeapObject());
@@ -1095,8 +1101,7 @@
                                              Handle<JSReceiver> receiver) {
   // We don't expect access checks to be needed on JSProxy objects.
   DCHECK(!receiver->IsAccessCheckNeeded() || receiver->IsJSObject());
-  PrototypeIterator iter(isolate, receiver,
-                         PrototypeIterator::START_AT_RECEIVER,
+  PrototypeIterator iter(isolate, receiver, kStartAtReceiver,
                          PrototypeIterator::END_AT_NON_HIDDEN);
   do {
     if (!iter.AdvanceFollowingProxies()) return MaybeHandle<Object>();
@@ -1111,6 +1116,13 @@
   return GetProperty(receiver, str);
 }
 
+// static
+MUST_USE_RESULT MaybeHandle<FixedArray> JSReceiver::OwnPropertyKeys(
+    Handle<JSReceiver> object) {
+  return KeyAccumulator::GetKeys(object, KeyCollectionMode::kOwnOnly,
+                                 ALL_PROPERTIES,
+                                 GetKeysConversion::kConvertToString);
+}
 
 #define FIELD_ADDR(p, offset) \
   (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
@@ -1742,7 +1754,7 @@
     PrintIsolate(GetIsolate(),
                  "pretenuring: AllocationSite(%p): (created, found, ratio) "
                  "(%d, %d, %f) %s => %s\n",
-                 this, create_count, found_count, ratio,
+                 static_cast<void*>(this), create_count, found_count, ratio,
                  PretenureDecisionName(current_decision),
                  PretenureDecisionName(pretenure_decision()));
   }
@@ -1793,8 +1805,7 @@
     DCHECK(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
     bool is_holey = IsFastHoleyElementsKind(current_kind);
     if (current_kind == FAST_HOLEY_ELEMENTS) return;
-    Heap* heap = object->GetHeap();
-    Object* the_hole = heap->the_hole_value();
+    Object* the_hole = object->GetHeap()->the_hole_value();
     for (uint32_t i = 0; i < count; ++i) {
       Object* current = *objects++;
       if (current == the_hole) {
@@ -2000,9 +2011,7 @@
   set_next(the_hole_value, SKIP_WRITE_BARRIER);
 }
 
-
-bool WeakCell::next_cleared() { return next()->IsTheHole(); }
-
+bool WeakCell::next_cleared() { return next()->IsTheHole(GetIsolate()); }
 
 int JSObject::GetHeaderSize() { return GetHeaderSize(map()->instance_type()); }
 
@@ -2060,6 +2069,10 @@
       return JSObject::kHeaderSize;
     case JS_MESSAGE_OBJECT_TYPE:
       return JSMessageObject::kSize;
+    case JS_ARGUMENTS_TYPE:
+      return JSArgumentsObject::kHeaderSize;
+    case JS_ERROR_TYPE:
+      return JSObject::kHeaderSize;
     default:
       UNREACHABLE();
       return 0;
@@ -2179,7 +2192,9 @@
   FieldIndex index = FieldIndex::ForDescriptor(map(), descriptor);
   if (details.representation().IsDouble()) {
     // Nothing more to be done.
-    if (value->IsUninitialized()) return;
+    if (value->IsUninitialized(this->GetIsolate())) {
+      return;
+    }
     if (IsUnboxedDoubleField(index)) {
       RawFastDoublePropertyAtPut(index, value->Number());
     } else {
@@ -2272,9 +2287,12 @@
 
 void Object::VerifyApiCallResultType() {
 #if DEBUG
-  if (!(IsSmi() || IsString() || IsSymbol() || IsJSReceiver() ||
-        IsHeapNumber() || IsSimd128Value() || IsUndefined() || IsTrue() ||
-        IsFalse() || IsNull())) {
+  if (IsSmi()) return;
+  DCHECK(IsHeapObject());
+  Isolate* isolate = HeapObject::cast(this)->GetIsolate();
+  if (!(IsString() || IsSymbol() || IsJSReceiver() || IsHeapNumber() ||
+        IsSimd128Value() || IsUndefined(isolate) || IsTrue(isolate) ||
+        IsFalse(isolate) || IsNull(isolate))) {
     FATAL("API call returned invalid object");
   }
 #endif  // DEBUG
@@ -2457,7 +2475,7 @@
 
 
 void ArrayList::Clear(int index, Object* undefined) {
-  DCHECK(undefined->IsUndefined());
+  DCHECK(undefined->IsUndefined(GetIsolate()));
   FixedArray::cast(this)
       ->set(kFirstIndex + index, undefined, SKIP_WRITE_BARRIER);
 }
@@ -2789,18 +2807,18 @@
 
 
 FixedArrayBase* Map::GetInitialElements() {
+  FixedArrayBase* result = nullptr;
   if (has_fast_elements() || has_fast_string_wrapper_elements()) {
-    DCHECK(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
-    return GetHeap()->empty_fixed_array();
+    result = GetHeap()->empty_fixed_array();
+  } else if (has_fast_sloppy_arguments_elements()) {
+    result = GetHeap()->empty_sloppy_arguments_elements();
   } else if (has_fixed_typed_array_elements()) {
-    FixedTypedArrayBase* empty_array =
-        GetHeap()->EmptyFixedTypedArrayForMap(this);
-    DCHECK(!GetHeap()->InNewSpace(empty_array));
-    return empty_array;
+    result = GetHeap()->EmptyFixedTypedArrayForMap(this);
   } else {
     UNREACHABLE();
   }
-  return NULL;
+  DCHECK(!GetHeap()->InNewSpace(result));
+  return result;
 }
 
 // static
@@ -3027,12 +3045,14 @@
   return Max(capacity, kMinCapacity);
 }
 
-bool HashTableBase::IsKey(Heap* heap, Object* k) {
+bool HashTableBase::IsKey(Isolate* isolate, Object* k) {
+  Heap* heap = isolate->heap();
   return k != heap->the_hole_value() && k != heap->undefined_value();
 }
 
 bool HashTableBase::IsKey(Object* k) {
-  return !k->IsTheHole() && !k->IsUndefined();
+  Isolate* isolate = this->GetIsolate();
+  return !k->IsTheHole(isolate) && !k->IsUndefined(isolate);
 }
 
 
@@ -3327,11 +3347,19 @@
 
 
 TypeFeedbackVector* LiteralsArray::feedback_vector() const {
+  if (length() == 0) {
+    return TypeFeedbackVector::cast(
+        const_cast<FixedArray*>(FixedArray::cast(this)));
+  }
   return TypeFeedbackVector::cast(get(kVectorIndex));
 }
 
 
 void LiteralsArray::set_feedback_vector(TypeFeedbackVector* vector) {
+  if (length() <= kVectorIndex) {
+    DCHECK(vector->length() == 0);
+    return;
+  }
   set(kVectorIndex, vector);
 }
 
@@ -3345,6 +3373,9 @@
   set(kFirstLiteralIndex + literal_index, literal);
 }
 
+void LiteralsArray::set_literal_undefined(int literal_index) {
+  set_undefined(kFirstLiteralIndex + literal_index);
+}
 
 int LiteralsArray::literals_count() const {
   return length() - kFirstLiteralIndex;
@@ -4039,6 +4070,13 @@
 
 int BytecodeArray::BytecodeArraySize() { return SizeFor(this->length()); }
 
+int BytecodeArray::SizeIncludingMetadata() {
+  int size = BytecodeArraySize();
+  size += constant_pool()->Size();
+  size += handler_table()->Size();
+  size += source_position_table()->Size();
+  return size;
+}
 
 ACCESSORS(FixedTypedArrayBase, base_pointer, Object, kBasePointerOffset)
 
@@ -4214,7 +4252,7 @@
   } else {
     // Clamp undefined to the default value. All other types have been
     // converted to a number type further up in the call chain.
-    DCHECK(value->IsUndefined());
+    DCHECK(value->IsUndefined(GetIsolate()));
   }
   set(index, cast_value);
 }
@@ -4531,6 +4569,10 @@
   return IsPrototypeMapBits::decode(bit_field2());
 }
 
+bool Map::should_be_fast_prototype_map() const {
+  if (!prototype_info()->IsPrototypeInfo()) return false;
+  return PrototypeInfo::cast(prototype_info())->should_be_fast_map();
+}
 
 void Map::set_elements_kind(ElementsKind elements_kind) {
   DCHECK(static_cast<int>(elements_kind) < kElementsKindCount);
@@ -4567,6 +4609,10 @@
   return IsSloppyArgumentsElements(elements_kind());
 }
 
+bool Map::has_fast_sloppy_arguments_elements() {
+  return elements_kind() == FAST_SLOPPY_ARGUMENTS_ELEMENTS;
+}
+
 bool Map::has_fast_string_wrapper_elements() {
   return elements_kind() == FAST_STRING_WRAPPER_ELEMENTS;
 }
@@ -4666,7 +4712,7 @@
 bool Map::has_code_cache() {
   // Code caches are always fixed arrays. The empty fixed array is used as a
   // sentinel for an absent code cache.
-  return FixedArray::cast(code_cache())->length() != 0;
+  return code_cache()->length() != 0;
 }
 
 
@@ -4798,35 +4844,21 @@
   return ExtractKindFromFlags(flags());
 }
 
-
 bool Code::IsCodeStubOrIC() {
-  return kind() == STUB || kind() == HANDLER || kind() == LOAD_IC ||
-         kind() == KEYED_LOAD_IC || kind() == CALL_IC || kind() == STORE_IC ||
-         kind() == KEYED_STORE_IC || kind() == BINARY_OP_IC ||
-         kind() == COMPARE_IC || kind() == TO_BOOLEAN_IC;
+  switch (kind()) {
+    case STUB:
+    case HANDLER:
+#define CASE_KIND(kind) case kind:
+      IC_KIND_LIST(CASE_KIND)
+#undef CASE_KIND
+      return true;
+    default:
+      return false;
+  }
 }
 
-
-bool Code::IsJavaScriptCode() {
-  return kind() == FUNCTION || kind() == OPTIMIZED_FUNCTION ||
-         is_interpreter_entry_trampoline();
-}
-
-
-InlineCacheState Code::ic_state() {
-  InlineCacheState result = ExtractICStateFromFlags(flags());
-  // Only allow uninitialized or debugger states for non-IC code
-  // objects. This is used in the debugger to determine whether or not
-  // a call to code object has been replaced with a debug break call.
-  DCHECK(is_inline_cache_stub() ||
-         result == UNINITIALIZED ||
-         result == DEBUG_STUB);
-  return result;
-}
-
-
 ExtraICState Code::extra_ic_state() {
-  DCHECK(is_inline_cache_stub() || ic_state() == DEBUG_STUB);
+  DCHECK(is_inline_cache_stub() || is_debug_stub());
   return ExtractExtraICStateFromFlags(flags());
 }
 
@@ -4852,18 +4884,21 @@
   return is_crankshafted() && kind() != OPTIMIZED_FUNCTION;
 }
 
-
-inline bool Code::is_interpreter_entry_trampoline() {
-  Handle<Code> interpreter_entry =
-      GetIsolate()->builtins()->InterpreterEntryTrampoline();
-  return interpreter_entry.location() != nullptr && *interpreter_entry == this;
+inline bool Code::is_interpreter_trampoline_builtin() {
+  Builtins* builtins = GetIsolate()->builtins();
+  return this == *builtins->InterpreterEntryTrampoline() ||
+         this == *builtins->InterpreterEnterBytecodeDispatch() ||
+         this == *builtins->InterpreterMarkBaselineOnReturn();
 }
 
-inline bool Code::is_interpreter_enter_bytecode_dispatch() {
-  Handle<Code> interpreter_handler =
-      GetIsolate()->builtins()->InterpreterEnterBytecodeDispatch();
-  return interpreter_handler.location() != nullptr &&
-         *interpreter_handler == this;
+inline bool Code::has_unwinding_info() const {
+  return HasUnwindingInfoField::decode(READ_UINT32_FIELD(this, kFlagsOffset));
+}
+
+inline void Code::set_has_unwinding_info(bool state) {
+  uint32_t previous = READ_UINT32_FIELD(this, kFlagsOffset);
+  uint32_t updated_value = HasUnwindingInfoField::update(previous, state);
+  WRITE_UINT32_FIELD(this, kFlagsOffset, updated_value);
 }
 
 inline void Code::set_is_crankshafted(bool value) {
@@ -5069,7 +5104,18 @@
   }
 }
 
-bool Code::is_debug_stub() { return ic_state() == DEBUG_STUB; }
+bool Code::is_debug_stub() {
+  if (kind() != BUILTIN) return false;
+  switch (builtin_index()) {
+#define CASE_DEBUG_BUILTIN(name, kind, extra) case Builtins::k##name:
+    BUILTIN_LIST_DEBUG_A(CASE_DEBUG_BUILTIN)
+#undef CASE_DEBUG_BUILTIN
+      return true;
+    default:
+      return false;
+  }
+  return false;
+}
 bool Code::is_handler() { return kind() == HANDLER; }
 bool Code::is_call_stub() { return kind() == CALL_IC; }
 bool Code::is_binary_op_stub() { return kind() == BINARY_OP_IC; }
@@ -5078,14 +5124,6 @@
 bool Code::is_optimized_code() { return kind() == OPTIMIZED_FUNCTION; }
 bool Code::is_wasm_code() { return kind() == WASM_FUNCTION; }
 
-bool Code::embeds_maps_weakly() {
-  Kind k = kind();
-  return (k == LOAD_IC || k == STORE_IC || k == KEYED_LOAD_IC ||
-          k == KEYED_STORE_IC) &&
-         ic_state() == MONOMORPHIC;
-}
-
-
 Address Code::constant_pool() {
   Address constant_pool = NULL;
   if (FLAG_enable_embedded_constant_pool) {
@@ -5097,25 +5135,20 @@
   return constant_pool;
 }
 
-Code::Flags Code::ComputeFlags(Kind kind, InlineCacheState ic_state,
-                               ExtraICState extra_ic_state,
+Code::Flags Code::ComputeFlags(Kind kind, ExtraICState extra_ic_state,
                                CacheHolderFlag holder) {
+  // TODO(ishell): remove ICStateField.
   // Compute the bit mask.
-  unsigned int bits = KindField::encode(kind) | ICStateField::encode(ic_state) |
+  unsigned int bits = KindField::encode(kind) |
+                      ICStateField::encode(MONOMORPHIC) |
                       ExtraICStateField::encode(extra_ic_state) |
                       CacheHolderField::encode(holder);
   return static_cast<Flags>(bits);
 }
 
-Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
-                                          ExtraICState extra_ic_state,
-                                          CacheHolderFlag holder) {
-  return ComputeFlags(kind, MONOMORPHIC, extra_ic_state, holder);
-}
-
 Code::Flags Code::ComputeHandlerFlags(Kind handler_kind,
                                       CacheHolderFlag holder) {
-  return ComputeFlags(Code::HANDLER, MONOMORPHIC, handler_kind, holder);
+  return ComputeFlags(Code::HANDLER, handler_kind, holder);
 }
 
 
@@ -5124,11 +5157,6 @@
 }
 
 
-InlineCacheState Code::ExtractICStateFromFlags(Flags flags) {
-  return ICStateField::decode(flags);
-}
-
-
 ExtraICState Code::ExtractExtraICStateFromFlags(Flags flags) {
   return ExtraICStateField::decode(flags);
 }
@@ -5217,6 +5245,13 @@
   }
 }
 
+int AbstractCode::SizeIncludingMetadata() {
+  if (IsCode()) {
+    return GetCode()->SizeIncludingMetadata();
+  } else {
+    return GetBytecodeArray()->SizeIncludingMetadata();
+  }
+}
 int AbstractCode::ExecutableSize() {
   if (IsCode()) {
     return GetCode()->ExecutableSize();
@@ -5268,7 +5303,7 @@
 
 
 void Map::set_prototype(Object* value, WriteBarrierMode mode) {
-  DCHECK(value->IsNull() || value->IsJSReceiver());
+  DCHECK(value->IsNull(GetIsolate()) || value->IsJSReceiver());
   WRITE_FIELD(this, kPrototypeOffset, value);
   CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, value, mode);
 }
@@ -5401,14 +5436,14 @@
 
 void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
   DCHECK(instance_type() >= FIRST_JS_RECEIVER_TYPE);
-  DCHECK((value->IsMap() && GetBackPointer()->IsUndefined()));
+  DCHECK(value->IsMap());
+  DCHECK(GetBackPointer()->IsUndefined(GetIsolate()));
   DCHECK(!value->IsMap() ||
          Map::cast(value)->GetConstructor() == constructor_or_backpointer());
   set_constructor_or_backpointer(value, mode);
 }
 
-
-ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
+ACCESSORS(Map, code_cache, FixedArray, kCodeCacheOffset)
 ACCESSORS(Map, dependent_code, DependentCode, kDependentCodeOffset)
 ACCESSORS(Map, weak_cell_cache, Object, kWeakCellCacheOffset)
 ACCESSORS(Map, constructor_or_backpointer, Object,
@@ -5466,9 +5501,32 @@
 
 ACCESSORS(Box, value, Object, kValueOffset)
 
+Map* PrototypeInfo::ObjectCreateMap() {
+  return Map::cast(WeakCell::cast(object_create_map())->value());
+}
+
+// static
+void PrototypeInfo::SetObjectCreateMap(Handle<PrototypeInfo> info,
+                                       Handle<Map> map) {
+  Handle<WeakCell> cell = Map::WeakCellForMap(map);
+  info->set_object_create_map(*cell);
+}
+
+bool PrototypeInfo::HasObjectCreateMap() {
+  Object* cache = object_create_map();
+  return cache->IsWeakCell() && !WeakCell::cast(cache)->cleared();
+}
+
+bool FunctionTemplateInfo::instantiated() {
+  return shared_function_info()->IsSharedFunctionInfo();
+}
+
 ACCESSORS(PrototypeInfo, prototype_users, Object, kPrototypeUsersOffset)
+ACCESSORS(PrototypeInfo, object_create_map, Object, kObjectCreateMap)
 SMI_ACCESSORS(PrototypeInfo, registry_slot, kRegistrySlotOffset)
 ACCESSORS(PrototypeInfo, validity_cell, Object, kValidityCellOffset)
+SMI_ACCESSORS(PrototypeInfo, bit_field, kBitFieldOffset)
+BOOL_ACCESSORS(PrototypeInfo, bit_field, should_be_fast_map, kShouldBeFastBit)
 
 ACCESSORS(SloppyBlockWithEvalContextExtension, scope_info, ScopeInfo,
           kScopeInfoOffset)
@@ -5478,9 +5536,10 @@
 ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
 ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
 
-ACCESSORS(AccessCheckInfo, named_callback, Object, kNamedCallbackOffset)
-ACCESSORS(AccessCheckInfo, indexed_callback, Object, kIndexedCallbackOffset)
 ACCESSORS(AccessCheckInfo, callback, Object, kCallbackOffset)
+ACCESSORS(AccessCheckInfo, named_interceptor, Object, kNamedInterceptorOffset)
+ACCESSORS(AccessCheckInfo, indexed_interceptor, Object,
+          kIndexedInterceptorOffset)
 ACCESSORS(AccessCheckInfo, data, Object, kDataOffset)
 
 ACCESSORS(InterceptorInfo, getter, Object, kGetterOffset)
@@ -5521,6 +5580,9 @@
           kInstanceCallHandlerOffset)
 ACCESSORS(FunctionTemplateInfo, access_check_info, Object,
           kAccessCheckInfoOffset)
+ACCESSORS(FunctionTemplateInfo, shared_function_info, Object,
+          kSharedFunctionInfoOffset)
+
 SMI_ACCESSORS(FunctionTemplateInfo, flag, kFlagOffset)
 
 ACCESSORS(ObjectTemplateInfo, constructor, Object, kConstructorOffset)
@@ -5601,8 +5663,8 @@
 ACCESSORS(SharedFunctionInfo, optimized_code_map, FixedArray,
           kOptimizedCodeMapOffset)
 ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
-ACCESSORS(SharedFunctionInfo, feedback_vector, TypeFeedbackVector,
-          kFeedbackVectorOffset)
+ACCESSORS(SharedFunctionInfo, feedback_metadata, TypeFeedbackMetadata,
+          kFeedbackMetadataOffset)
 #if TRACE_MAPS
 SMI_ACCESSORS(SharedFunctionInfo, unique_id, kUniqueIdOffset)
 #endif
@@ -5626,7 +5688,6 @@
                kRemovePrototypeBit)
 BOOL_ACCESSORS(FunctionTemplateInfo, flag, do_not_cache,
                kDoNotCacheBit)
-BOOL_ACCESSORS(FunctionTemplateInfo, flag, instantiated, kInstantiatedBit)
 BOOL_ACCESSORS(FunctionTemplateInfo, flag, accept_any_receiver,
                kAcceptAnyReceiver)
 BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_named_expression,
@@ -5941,7 +6002,7 @@
 }
 
 void SharedFunctionInfo::set_api_func_data(FunctionTemplateInfo* data) {
-  DCHECK(function_data()->IsUndefined());
+  DCHECK(function_data()->IsUndefined(GetIsolate()));
   set_function_data(data);
 }
 
@@ -5955,12 +6016,12 @@
 }
 
 void SharedFunctionInfo::set_bytecode_array(BytecodeArray* bytecode) {
-  DCHECK(function_data()->IsUndefined());
+  DCHECK(function_data()->IsUndefined(GetIsolate()));
   set_function_data(bytecode);
 }
 
 void SharedFunctionInfo::ClearBytecodeArray() {
-  DCHECK(function_data()->IsUndefined() || HasBytecodeArray());
+  DCHECK(function_data()->IsUndefined(GetIsolate()) || HasBytecodeArray());
   set_function_data(GetHeap()->undefined_value());
 }
 
@@ -5986,12 +6047,13 @@
   if (HasInferredName()) {
     return String::cast(function_identifier());
   }
-  DCHECK(function_identifier()->IsUndefined() || HasBuiltinFunctionId());
-  return GetIsolate()->heap()->empty_string();
+  Isolate* isolate = GetIsolate();
+  DCHECK(function_identifier()->IsUndefined(isolate) || HasBuiltinFunctionId());
+  return isolate->heap()->empty_string();
 }
 
 void SharedFunctionInfo::set_inferred_name(String* inferred_name) {
-  DCHECK(function_identifier()->IsUndefined() || HasInferredName());
+  DCHECK(function_identifier()->IsUndefined(GetIsolate()) || HasInferredName());
   set_function_identifier(inferred_name);
 }
 
@@ -6077,7 +6139,7 @@
 
 bool SharedFunctionInfo::IsBuiltin() {
   Object* script_obj = script();
-  if (script_obj->IsUndefined()) return true;
+  if (script_obj->IsUndefined(GetIsolate())) return true;
   Script* script = Script::cast(script_obj);
   Script::Type type = static_cast<Script::Type>(script->type());
   return type != Script::TYPE_NORMAL;
@@ -6142,7 +6204,7 @@
 
 AbstractCode* JSFunction::abstract_code() {
   Code* code = this->code();
-  if (code->is_interpreter_entry_trampoline()) {
+  if (code->is_interpreter_trampoline_builtin()) {
     return AbstractCode::cast(shared()->bytecode_array());
   } else {
     return AbstractCode::cast(code);
@@ -6210,7 +6272,7 @@
 
 
 void JSFunction::set_context(Object* value) {
-  DCHECK(value->IsUndefined() || value->IsContext());
+  DCHECK(value->IsUndefined(GetIsolate()) || value->IsContext());
   WRITE_FIELD(this, kContextOffset, value);
   WRITE_BARRIER(GetHeap(), this, kContextOffset, value);
 }
@@ -6230,7 +6292,8 @@
 
 
 bool JSFunction::has_instance_prototype() {
-  return has_initial_map() || !prototype_or_initial_map()->IsTheHole();
+  return has_initial_map() ||
+         !prototype_or_initial_map()->IsTheHole(GetIsolate());
 }
 
 
@@ -6270,12 +6333,11 @@
          code() != builtins->builtin(Builtins::kCompileOptimizedConcurrent);
 }
 
-
-int JSFunction::NumberOfLiterals() {
-  return literals()->length();
+TypeFeedbackVector* JSFunction::feedback_vector() {
+  LiteralsArray* array = literals();
+  return array->feedback_vector();
 }
 
-
 ACCESSORS(JSProxy, target, JSReceiver, kTargetOffset)
 ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
 ACCESSORS(JSProxy, hash, Object, kHashOffset)
@@ -6321,22 +6383,22 @@
 ACCESSORS(JSGeneratorObject, function, JSFunction, kFunctionOffset)
 ACCESSORS(JSGeneratorObject, context, Context, kContextOffset)
 ACCESSORS(JSGeneratorObject, receiver, Object, kReceiverOffset)
-ACCESSORS(JSGeneratorObject, input, Object, kInputOffset)
+ACCESSORS(JSGeneratorObject, input_or_debug_pos, Object, kInputOrDebugPosOffset)
 SMI_ACCESSORS(JSGeneratorObject, resume_mode, kResumeModeOffset)
 SMI_ACCESSORS(JSGeneratorObject, continuation, kContinuationOffset)
 ACCESSORS(JSGeneratorObject, operand_stack, FixedArray, kOperandStackOffset)
 
-bool JSGeneratorObject::is_suspended() {
+bool JSGeneratorObject::is_suspended() const {
   DCHECK_LT(kGeneratorExecuting, 0);
   DCHECK_LT(kGeneratorClosed, 0);
   return continuation() >= 0;
 }
 
-bool JSGeneratorObject::is_closed() {
+bool JSGeneratorObject::is_closed() const {
   return continuation() == kGeneratorClosed;
 }
 
-bool JSGeneratorObject::is_executing() {
+bool JSGeneratorObject::is_executing() const {
   return continuation() == kGeneratorExecuting;
 }
 
@@ -6387,7 +6449,6 @@
 ACCESSORS(Code, raw_type_feedback_info, Object, kTypeFeedbackInfoOffset)
 ACCESSORS(Code, next_code_link, Object, kNextCodeLinkOffset)
 
-
 void Code::WipeOutHeader() {
   WRITE_FIELD(this, kRelocationInfoOffset, NULL);
   WRITE_FIELD(this, kHandlerTableOffset, NULL);
@@ -6441,11 +6502,47 @@
   return instruction_start() + instruction_size();
 }
 
-
-int Code::body_size() {
-  return RoundUp(instruction_size(), kObjectAlignment);
+int Code::GetUnwindingInfoSizeOffset() const {
+  DCHECK(has_unwinding_info());
+  return RoundUp(kHeaderSize + instruction_size(), kInt64Size);
 }
 
+int Code::unwinding_info_size() const {
+  DCHECK(has_unwinding_info());
+  return static_cast<int>(
+      READ_UINT64_FIELD(this, GetUnwindingInfoSizeOffset()));
+}
+
+void Code::set_unwinding_info_size(int value) {
+  DCHECK(has_unwinding_info());
+  WRITE_UINT64_FIELD(this, GetUnwindingInfoSizeOffset(), value);
+}
+
+byte* Code::unwinding_info_start() {
+  DCHECK(has_unwinding_info());
+  return FIELD_ADDR(this, GetUnwindingInfoSizeOffset()) + kInt64Size;
+}
+
+byte* Code::unwinding_info_end() {
+  DCHECK(has_unwinding_info());
+  return unwinding_info_start() + unwinding_info_size();
+}
+
+int Code::body_size() {
+  int unpadded_body_size =
+      has_unwinding_info()
+          ? static_cast<int>(unwinding_info_end() - instruction_start())
+          : instruction_size();
+  return RoundUp(unpadded_body_size, kObjectAlignment);
+}
+
+int Code::SizeIncludingMetadata() {
+  int size = CodeSize();
+  size += relocation_info()->Size();
+  size += deoptimization_data()->Size();
+  size += handler_table()->Size();
+  return size;
+}
 
 ByteArray* Code::unchecked_relocation_info() {
   return reinterpret_cast<ByteArray*>(READ_FIELD(this, kRelocationInfoOffset));
@@ -6620,7 +6717,7 @@
 
 JSRegExp::Type JSRegExp::TypeTag() {
   Object* data = this->data();
-  if (data->IsUndefined()) return JSRegExp::NOT_COMPILED;
+  if (data->IsUndefined(GetIsolate())) return JSRegExp::NOT_COMPILED;
   Smi* smi = Smi::cast(FixedArray::cast(data)->get(kTagIndex));
   return static_cast<JSRegExp::Type>(smi->value());
 }
@@ -6678,16 +6775,18 @@
   // pointer may point to a one pointer filler map.
   if (ElementsAreSafeToExamine()) {
     Map* map = fixed_array->map();
-    DCHECK((IsFastSmiOrObjectElementsKind(kind) &&
-            (map == GetHeap()->fixed_array_map() ||
-             map == GetHeap()->fixed_cow_array_map())) ||
-           (IsFastDoubleElementsKind(kind) &&
-            (fixed_array->IsFixedDoubleArray() ||
-             fixed_array == GetHeap()->empty_fixed_array())) ||
-           (kind == DICTIONARY_ELEMENTS &&
-            fixed_array->IsFixedArray() &&
-            fixed_array->IsDictionary()) ||
-           (kind > DICTIONARY_ELEMENTS));
+    if (IsFastSmiOrObjectElementsKind(kind)) {
+      DCHECK(map == GetHeap()->fixed_array_map() ||
+             map == GetHeap()->fixed_cow_array_map());
+    } else if (IsFastDoubleElementsKind(kind)) {
+      DCHECK(fixed_array->IsFixedDoubleArray() ||
+             fixed_array == GetHeap()->empty_fixed_array());
+    } else if (kind == DICTIONARY_ELEMENTS) {
+      DCHECK(fixed_array->IsFixedArray());
+      DCHECK(fixed_array->IsDictionary());
+    } else {
+      DCHECK(kind > DICTIONARY_ELEMENTS);
+    }
     DCHECK(!IsSloppyArgumentsElements(kind) ||
            (elements()->IsFixedArray() && elements()->length() >= 2));
   }
@@ -7216,19 +7315,20 @@
   return iter.GetCurrent() != global;
 }
 
-
-Handle<Smi> JSReceiver::GetOrCreateIdentityHash(Handle<JSReceiver> object) {
-  return object->IsJSProxy()
-      ? JSProxy::GetOrCreateIdentityHash(Handle<JSProxy>::cast(object))
-      : JSObject::GetOrCreateIdentityHash(Handle<JSObject>::cast(object));
+Smi* JSReceiver::GetOrCreateIdentityHash(Isolate* isolate,
+                                         Handle<JSReceiver> object) {
+  return object->IsJSProxy() ? JSProxy::GetOrCreateIdentityHash(
+                                   isolate, Handle<JSProxy>::cast(object))
+                             : JSObject::GetOrCreateIdentityHash(
+                                   isolate, Handle<JSObject>::cast(object));
 }
 
-Handle<Object> JSReceiver::GetIdentityHash(Isolate* isolate,
-                                           Handle<JSReceiver> receiver) {
-  return receiver->IsJSProxy() ? JSProxy::GetIdentityHash(
-                                     isolate, Handle<JSProxy>::cast(receiver))
-                               : JSObject::GetIdentityHash(
-                                     isolate, Handle<JSObject>::cast(receiver));
+Object* JSReceiver::GetIdentityHash(Isolate* isolate,
+                                    Handle<JSReceiver> receiver) {
+  return receiver->IsJSProxy()
+             ? JSProxy::GetIdentityHash(Handle<JSProxy>::cast(receiver))
+             : JSObject::GetIdentityHash(isolate,
+                                         Handle<JSObject>::cast(receiver));
 }
 
 
@@ -7276,6 +7376,9 @@
   set_flag(AttributesField::update(flag(), attributes));
 }
 
+bool FunctionTemplateInfo::IsTemplateFor(JSObject* object) {
+  return IsTemplateFor(object->map());
+}
 
 bool AccessorInfo::IsCompatibleReceiver(Object* receiver) {
   if (!HasExpectedReceiverType()) return true;
@@ -7305,8 +7408,9 @@
 
 
 void AccessorPair::SetComponents(Object* getter, Object* setter) {
-  if (!getter->IsNull()) set_getter(getter);
-  if (!setter->IsNull()) set_setter(setter);
+  Isolate* isolate = GetIsolate();
+  if (!getter->IsNull(isolate)) set_getter(getter);
+  if (!setter->IsNull(isolate)) set_setter(setter);
 }
 
 
@@ -7326,7 +7430,7 @@
 
 
 bool AccessorPair::IsJSAccessor(Object* obj) {
-  return obj->IsCallable() || obj->IsUndefined();
+  return obj->IsCallable() || obj->IsUndefined(GetIsolate());
 }
 
 
@@ -7358,9 +7462,9 @@
   int index = dict->EntryToIndex(entry);
   DisallowHeapAllocation no_gc;
   WriteBarrierMode mode = dict->GetWriteBarrierMode(no_gc);
-  dict->set(index, *key, mode);
-  dict->set(index + 1, *value, mode);
-  dict->set(index + 2, details.AsSmi());
+  dict->set(index + Dictionary::kEntryKeyIndex, *key, mode);
+  dict->set(index + Dictionary::kEntryValueIndex, *value, mode);
+  dict->set(index + Dictionary::kEntryDetailsIndex, details.AsSmi());
 }
 
 
@@ -7374,8 +7478,8 @@
   int index = dict->EntryToIndex(entry);
   DisallowHeapAllocation no_gc;
   WriteBarrierMode mode = dict->GetWriteBarrierMode(no_gc);
-  dict->set(index, *key, mode);
-  dict->set(index + 1, *value, mode);
+  dict->set(index + Dictionary::kEntryKeyIndex, *key, mode);
+  dict->set(index + Dictionary::kEntryValueIndex, *value, mode);
   PropertyCell::cast(*value)->set_property_details(details);
 }
 
@@ -7471,7 +7575,8 @@
 template <typename Dictionary>
 bool GlobalDictionaryShape::IsDeleted(Dictionary* dict, int entry) {
   DCHECK(dict->ValueAt(entry)->IsPropertyCell());
-  return PropertyCell::cast(dict->ValueAt(entry))->value()->IsTheHole();
+  Isolate* isolate = dict->GetIsolate();
+  return PropertyCell::cast(dict->ValueAt(entry))->value()->IsTheHole(isolate);
 }
 
 
@@ -7745,7 +7850,7 @@
   TableType* table(TableType::cast(this->table()));
   int index = Smi::cast(this->index())->value();
   Object* key = table->KeyAt(index);
-  DCHECK(!key->IsTheHole());
+  DCHECK(!key->IsTheHole(table->GetIsolate()));
   return key;
 }
 
@@ -7765,7 +7870,7 @@
   OrderedHashMap* table(OrderedHashMap::cast(this->table()));
   int index = Smi::cast(this->index())->value();
   Object* value = table->ValueAt(index);
-  DCHECK(!value->IsTheHole());
+  DCHECK(!value->IsTheHole(table->GetIsolate()));
   return value;
 }
 
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 551beb2..464177b 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -106,6 +106,8 @@
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_GENERATOR_OBJECT_TYPE:
     case JS_PROMISE_TYPE:
+    case JS_ARGUMENTS_TYPE:
+    case JS_ERROR_TYPE:
       JSObject::cast(this)->JSObjectPrint(os);
       break;
     case JS_ARRAY_TYPE:
@@ -380,8 +382,7 @@
 
     case DICTIONARY_ELEMENTS:
     case SLOW_STRING_WRAPPER_ELEMENTS:
-      os << "\n - elements: ";
-      elements()->Print(os);
+      SeededNumberDictionary::cast(elements())->Print(os);
       break;
     case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
     case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
@@ -424,10 +425,14 @@
 
 static void JSObjectPrintBody(std::ostream& os, JSObject* obj,  // NOLINT
                               bool print_elements = true) {
-  os << "\n {";
+  os << "\n - properties = {";
   obj->PrintProperties(os);
-  if (print_elements) obj->PrintElements(os);
   os << "\n }\n";
+  if (print_elements && obj->elements()->length() > 0) {
+    os << " - elements = {";
+    obj->PrintElements(os);
+    os << "\n }\n";
+  }
 }
 
 
@@ -462,7 +467,7 @@
   HeapObject::PrintHeader(os, "Symbol");
   os << "\n - hash: " << Hash();
   os << "\n - name: " << Brief(name());
-  if (name()->IsUndefined()) {
+  if (name()->IsUndefined(GetIsolate())) {
     os << " (" << PrivateSymbolToName() << ")";
   }
   os << "\n - private: " << is_private();
@@ -575,6 +580,40 @@
   os << "\n";
 }
 
+template void FeedbackVectorSpecBase<StaticFeedbackVectorSpec>::Print();
+template void FeedbackVectorSpecBase<FeedbackVectorSpec>::Print();
+
+template <typename Derived>
+void FeedbackVectorSpecBase<Derived>::Print() {
+  OFStream os(stdout);
+  FeedbackVectorSpecPrint(os);
+  os << std::flush;
+}
+
+template <typename Derived>
+void FeedbackVectorSpecBase<Derived>::FeedbackVectorSpecPrint(
+    std::ostream& os) {  // NOLINT
+  int slot_count = This()->slots();
+  os << " - slot_count: " << slot_count;
+  if (slot_count == 0) {
+    os << " (empty)\n";
+    return;
+  }
+
+  for (int slot = 0, name_index = 0; slot < slot_count;) {
+    FeedbackVectorSlotKind kind = This()->GetKind(slot);
+    int entry_size = TypeFeedbackMetadata::GetSlotSize(kind);
+    DCHECK_LT(0, entry_size);
+
+    os << "\n Slot #" << slot << " " << kind;
+    if (TypeFeedbackMetadata::SlotRequiresName(kind)) {
+      os << ", " << Brief(*This()->GetName(name_index++));
+    }
+
+    slot += entry_size;
+  }
+  os << "\n";
+}
 
 void TypeFeedbackMetadata::Print() {
   OFStream os(stdout);
@@ -591,12 +630,16 @@
     os << " (empty)\n";
     return;
   }
+  os << "\n - slot_count: " << slot_count();
 
   TypeFeedbackMetadataIterator iter(this);
   while (iter.HasNext()) {
     FeedbackVectorSlot slot = iter.Next();
     FeedbackVectorSlotKind kind = iter.kind();
     os << "\n Slot " << slot << " " << kind;
+    if (TypeFeedbackMetadata::SlotRequiresName(kind)) {
+      os << ", " << Brief(iter.name());
+    }
   }
   os << "\n";
 }
@@ -622,13 +665,22 @@
     FeedbackVectorSlot slot = iter.Next();
     FeedbackVectorSlotKind kind = iter.kind();
 
-    os << "\n Slot " << slot << " " << kind << " ";
+    os << "\n Slot " << slot << " " << kind;
+    if (TypeFeedbackMetadata::SlotRequiresName(kind)) {
+      os << ", " << Brief(iter.name());
+    }
+    os << " ";
     switch (kind) {
       case FeedbackVectorSlotKind::LOAD_IC: {
         LoadICNexus nexus(this, slot);
         os << Code::ICState2String(nexus.StateFromFeedback());
         break;
       }
+      case FeedbackVectorSlotKind::LOAD_GLOBAL_IC: {
+        LoadGlobalICNexus nexus(this, slot);
+        os << Code::ICState2String(nexus.StateFromFeedback());
+        break;
+      }
       case FeedbackVectorSlotKind::KEYED_LOAD_IC: {
         KeyedLoadICNexus nexus(this, slot);
         os << Code::ICState2String(nexus.StateFromFeedback());
@@ -716,8 +768,6 @@
 void Name::NamePrint(std::ostream& os) {  // NOLINT
   if (IsString()) {
     String::cast(this)->StringPrint(os);
-  } else if (IsSymbol()) {
-    Symbol::cast(this)->name()->Print(os);
   } else {
     os << Brief(this);
   }
@@ -875,6 +925,8 @@
      << shared()->internal_formal_parameter_count();
   if (shared()->is_generator()) {
     os << "\n   - generator";
+  } else if (shared()->is_async()) {
+    os << "\n   - async";
   }
   os << "\n - context = " << Brief(context());
   os << "\n - literals = " << Brief(literals());
@@ -916,9 +968,10 @@
   os << "\n - end position = " << end_position();
   os << "\n - debug info = " << Brief(debug_info());
   os << "\n - length = " << length();
+  os << "\n - num_literals = " << num_literals();
   os << "\n - optimized_code_map = " << Brief(optimized_code_map());
-  os << "\n - feedback_vector = ";
-  feedback_vector()->TypeFeedbackVectorPrint(os);
+  os << "\n - feedback_metadata = ";
+  feedback_metadata()->TypeFeedbackMetadataPrint(os);
   if (HasBytecodeArray()) {
     os << "\n - bytecode_array = " << bytecode_array();
   }
@@ -1032,9 +1085,9 @@
 
 void AccessCheckInfo::AccessCheckInfoPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "AccessCheckInfo");
-  os << "\n - named_callback: " << Brief(named_callback());
-  os << "\n - indexed_callback: " << Brief(indexed_callback());
   os << "\n - callback: " << Brief(callback());
+  os << "\n - named_interceptor: " << Brief(named_interceptor());
+  os << "\n - indexed_interceptor: " << Brief(indexed_interceptor());
   os << "\n - data: " << Brief(data());
   os << "\n";
 }
@@ -1187,7 +1240,7 @@
 
 void LayoutDescriptor::Print(std::ostream& os) {  // NOLINT
   os << "Layout descriptor: ";
-  if (IsUninitialized()) {
+  if (IsOddball() && IsUninitialized(HeapObject::cast(this)->GetIsolate())) {
     os << "<uninitialized>";
   } else if (IsFastPointerLayout()) {
     os << "<all tagged>";
@@ -1218,7 +1271,7 @@
   } else {
     DCHECK(this->IsSymbol());
     Symbol* s = Symbol::cast(this);
-    if (s->name()->IsUndefined()) {
+    if (s->name()->IsUndefined(GetIsolate())) {
       PrintF("#<%s>", s->PrivateSymbolToName());
     } else {
       PrintF("<%s>", String::cast(s->name())->ToCString().get());
@@ -1233,7 +1286,7 @@
   } else {
     DCHECK(this->IsSymbol());
     Symbol* s = Symbol::cast(this);
-    if (s->name()->IsUndefined()) {
+    if (s->name()->IsUndefined(GetIsolate())) {
       return SNPrintF(str, "#<%s>", s->PrivateSymbolToName());
     } else {
       return SNPrintF(str, "<%s>", String::cast(s->name())->ToCString().get());
diff --git a/src/objects.cc b/src/objects.cc
index addf97a..fb5bb5e 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -12,7 +12,7 @@
 
 #include "src/accessors.h"
 #include "src/allocation-site-scopes.h"
-#include "src/api-arguments.h"
+#include "src/api-arguments-inl.h"
 #include "src/api-natives.h"
 #include "src/api.h"
 #include "src/base/bits.h"
@@ -47,7 +47,6 @@
 #include "src/macro-assembler.h"
 #include "src/messages.h"
 #include "src/objects-body-descriptors-inl.h"
-#include "src/profiler/cpu-profiler.h"
 #include "src/property-descriptor.h"
 #include "src/prototype.h"
 #include "src/regexp/jsregexp.h"
@@ -124,7 +123,7 @@
                                                 Handle<Object> object) {
   if (object->IsJSReceiver()) return Handle<JSReceiver>::cast(object);
   if (*object == isolate->heap()->null_value() ||
-      *object == isolate->heap()->undefined_value()) {
+      object->IsUndefined(isolate)) {
     return isolate->global_proxy();
   }
   return Object::ToObject(isolate, object);
@@ -231,9 +230,11 @@
 
 
 bool Object::BooleanValue() {
-  if (IsBoolean()) return IsTrue();
   if (IsSmi()) return Smi::cast(this)->value() != 0;
-  if (IsUndefined() || IsNull()) return false;
+  DCHECK(IsHeapObject());
+  Isolate* isolate = HeapObject::cast(this)->GetIsolate();
+  if (IsBoolean()) return IsTrue(isolate);
+  if (IsUndefined(isolate) || IsNull(isolate)) return false;
   if (IsUndetectable()) return false;  // Undetectable object is false.
   if (IsString()) return String::cast(this)->length() != 0;
   if (IsHeapNumber()) return HeapNumber::cast(this)->HeapNumberBooleanValue();
@@ -613,30 +614,28 @@
 // static
 MaybeHandle<Object> Object::InstanceOf(Isolate* isolate, Handle<Object> object,
                                        Handle<Object> callable) {
-  if (FLAG_harmony_instanceof) {
-    // The {callable} must be a receiver.
-    if (!callable->IsJSReceiver()) {
-      THROW_NEW_ERROR(
-          isolate, NewTypeError(MessageTemplate::kNonObjectInInstanceOfCheck),
-          Object);
-    }
+  // The {callable} must be a receiver.
+  if (!callable->IsJSReceiver()) {
+    THROW_NEW_ERROR(isolate,
+                    NewTypeError(MessageTemplate::kNonObjectInInstanceOfCheck),
+                    Object);
+  }
 
-    // Lookup the @@hasInstance method on {callable}.
-    Handle<Object> inst_of_handler;
+  // Lookup the @@hasInstance method on {callable}.
+  Handle<Object> inst_of_handler;
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, inst_of_handler,
+      JSReceiver::GetMethod(Handle<JSReceiver>::cast(callable),
+                            isolate->factory()->has_instance_symbol()),
+      Object);
+  if (!inst_of_handler->IsUndefined(isolate)) {
+    // Call the {inst_of_handler} on the {callable}.
+    Handle<Object> result;
     ASSIGN_RETURN_ON_EXCEPTION(
-        isolate, inst_of_handler,
-        JSReceiver::GetMethod(Handle<JSReceiver>::cast(callable),
-                              isolate->factory()->has_instance_symbol()),
+        isolate, result,
+        Execution::Call(isolate, inst_of_handler, callable, 1, &object),
         Object);
-    if (!inst_of_handler->IsUndefined()) {
-      // Call the {inst_of_handler} on the {callable}.
-      Handle<Object> result;
-      ASSIGN_RETURN_ON_EXCEPTION(
-          isolate, result,
-          Execution::Call(isolate, inst_of_handler, callable, 1, &object),
-          Object);
-      return isolate->factory()->ToBoolean(result->BooleanValue());
-    }
+    return isolate->factory()->ToBoolean(result->BooleanValue());
   }
 
   // The {callable} must have a [[Call]] internal method.
@@ -671,20 +670,6 @@
 }
 
 
-bool Object::IsPromise(Handle<Object> object) {
-  if (!object->IsJSObject()) return false;
-  auto js_object = Handle<JSObject>::cast(object);
-  // Promises can't have access checks.
-  if (js_object->map()->is_access_check_needed()) return false;
-  auto isolate = js_object->GetIsolate();
-  // TODO(dcarney): this should just be read from the symbol registry so as not
-  // to be context dependent.
-  auto key = isolate->factory()->promise_state_symbol();
-  // Shouldn't be possible to throw here.
-  return JSObject::HasRealNamedProperty(js_object, key).FromJust();
-}
-
-
 // static
 MaybeHandle<Object> Object::GetMethod(Handle<JSReceiver> receiver,
                                       Handle<Name> name) {
@@ -692,7 +677,7 @@
   Isolate* isolate = receiver->GetIsolate();
   ASSIGN_RETURN_ON_EXCEPTION(isolate, func,
                              JSReceiver::GetProperty(receiver, name), Object);
-  if (func->IsNull() || func->IsUndefined()) {
+  if (func->IsNull(isolate) || func->IsUndefined(isolate)) {
     return isolate->factory()->undefined_value();
   }
   if (!func->IsCallable()) {
@@ -719,14 +704,9 @@
   }
   // 4. Let len be ? ToLength(? Get(obj, "length")).
   Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
-  Handle<Object> raw_length_obj;
-  ASSIGN_RETURN_ON_EXCEPTION(
-      isolate, raw_length_obj,
-      JSReceiver::GetProperty(receiver, isolate->factory()->length_string()),
-      FixedArray);
   Handle<Object> raw_length_number;
   ASSIGN_RETURN_ON_EXCEPTION(isolate, raw_length_number,
-                             Object::ToLength(isolate, raw_length_obj),
+                             Object::GetLengthFromArrayLike(isolate, receiver),
                              FixedArray);
   uint32_t len;
   if (!raw_length_number->ToUint32(&len) ||
@@ -773,6 +753,16 @@
 
 
 // static
+MaybeHandle<Object> Object::GetLengthFromArrayLike(Isolate* isolate,
+                                                   Handle<Object> object) {
+  Handle<Object> val;
+  Handle<Object> key = isolate->factory()->length_string();
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, val, Runtime::GetObjectProperty(isolate, object, key), Object);
+  return Object::ToLength(isolate, val);
+}
+
+// static
 Maybe<bool> JSReceiver::HasProperty(LookupIterator* it) {
   for (; it->IsFound(); it->Next()) {
     switch (it->state()) {
@@ -882,7 +872,7 @@
       isolate, trap,
       Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name), Object);
   // 7. If trap is undefined, then
-  if (trap->IsUndefined()) {
+  if (trap->IsUndefined(isolate)) {
     // 7.a Return target.[[Get]](P, Receiver).
     LookupIterator it =
         LookupIterator::PropertyOrElement(isolate, receiver, name, target);
@@ -922,8 +912,8 @@
     // 10.b.i. If trapResult is not undefined, throw a TypeError exception.
     inconsistent = PropertyDescriptor::IsAccessorDescriptor(&target_desc) &&
                    !target_desc.configurable() &&
-                   target_desc.get()->IsUndefined() &&
-                   !trap_result->IsUndefined();
+                   target_desc.get()->IsUndefined(isolate) &&
+                   !trap_result->IsUndefined(isolate);
     if (inconsistent) {
       THROW_NEW_ERROR(
           isolate,
@@ -982,13 +972,41 @@
   return false;
 }
 
+Handle<SharedFunctionInfo> FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(
+    Isolate* isolate, Handle<FunctionTemplateInfo> info) {
+  Object* current_info = info->shared_function_info();
+  if (current_info->IsSharedFunctionInfo()) {
+    return handle(SharedFunctionInfo::cast(current_info), isolate);
+  }
 
-bool FunctionTemplateInfo::IsTemplateFor(Object* object) {
-  if (!object->IsHeapObject()) return false;
-  return IsTemplateFor(HeapObject::cast(object)->map());
+  Handle<Object> class_name(info->class_name(), isolate);
+  Handle<String> name = class_name->IsString()
+                            ? Handle<String>::cast(class_name)
+                            : isolate->factory()->empty_string();
+  Handle<Code> code;
+  if (info->call_code()->IsCallHandlerInfo() &&
+      CallHandlerInfo::cast(info->call_code())->fast_handler()->IsCode()) {
+    code = isolate->builtins()->HandleFastApiCall();
+  } else {
+    code = isolate->builtins()->HandleApiCall();
+  }
+  bool is_constructor = !info->remove_prototype();
+  Handle<SharedFunctionInfo> result =
+      isolate->factory()->NewSharedFunctionInfo(name, code, is_constructor);
+  if (is_constructor) {
+    result->set_construct_stub(*isolate->builtins()->JSConstructStubApi());
+  }
+
+  result->set_length(info->length());
+  if (class_name->IsString()) result->set_instance_class_name(*class_name);
+  result->set_api_func_data(*info);
+  result->DontAdaptArguments();
+  DCHECK(result->IsApiFunction());
+
+  info->set_shared_function_info(*result);
+  return result;
 }
 
-
 bool FunctionTemplateInfo::IsTemplateFor(Map* map) {
   // There is a constraint on the object; check.
   if (!map->IsJSObjectMap()) return false;
@@ -1008,26 +1026,6 @@
 }
 
 
-// TODO(dcarney): CallOptimization duplicates this logic, merge.
-Object* FunctionTemplateInfo::GetCompatibleReceiver(Isolate* isolate,
-                                                    Object* receiver) {
-  // API calls are only supported with JSObject receivers.
-  if (!receiver->IsJSObject()) return isolate->heap()->null_value();
-  Object* recv_type = this->signature();
-  // No signature, return holder.
-  if (recv_type->IsUndefined()) return receiver;
-  FunctionTemplateInfo* signature = FunctionTemplateInfo::cast(recv_type);
-  // Check the receiver.
-  for (PrototypeIterator iter(isolate, JSObject::cast(receiver),
-                              PrototypeIterator::START_AT_RECEIVER,
-                              PrototypeIterator::END_AT_NON_HIDDEN);
-       !iter.IsAtEnd(); iter.Advance()) {
-    if (signature->IsTemplateFor(iter.GetCurrent())) return iter.GetCurrent();
-  }
-  return isolate->heap()->null_value();
-}
-
-
 // static
 MaybeHandle<JSObject> JSObject::New(Handle<JSFunction> constructor,
                                     Handle<JSReceiver> new_target,
@@ -1094,7 +1092,7 @@
   ASSIGN_RETURN_ON_EXCEPTION(isolate, trap, GetMethod(handler, trap_name),
                              Object);
   // 6. If trap is undefined, then return target.[[GetPrototypeOf]]().
-  if (trap->IsUndefined()) {
+  if (trap->IsUndefined(isolate)) {
     return JSReceiver::GetPrototype(isolate, target);
   }
   // 7. Let handlerProto be ? Call(trap, handler, «target»).
@@ -1104,7 +1102,7 @@
       isolate, handler_proto,
       Execution::Call(isolate, trap, handler, arraysize(argv), argv), Object);
   // 8. If Type(handlerProto) is neither Object nor Null, throw a TypeError.
-  if (!(handler_proto->IsJSReceiver() || handler_proto->IsNull())) {
+  if (!(handler_proto->IsJSReceiver() || handler_proto->IsNull(isolate))) {
     THROW_NEW_ERROR(isolate,
                     NewTypeError(MessageTemplate::kProxyGetPrototypeOfInvalid),
                     Object);
@@ -1172,16 +1170,9 @@
   // Regular accessor.
   Handle<Object> getter(AccessorPair::cast(*structure)->getter(), isolate);
   if (getter->IsFunctionTemplateInfo()) {
-    auto result = Builtins::InvokeApiFunction(
-        Handle<FunctionTemplateInfo>::cast(getter), receiver, 0, nullptr);
-    if (isolate->has_pending_exception()) {
-      return MaybeHandle<Object>();
-    }
-    Handle<Object> return_value;
-    if (result.ToHandle(&return_value)) {
-      return_value->VerifyApiCallResultType();
-      return handle(*return_value, isolate);
-    }
+    return Builtins::InvokeApiFunction(
+        isolate, Handle<FunctionTemplateInfo>::cast(getter), receiver, 0,
+        nullptr);
   } else if (getter->IsCallable()) {
     // TODO(rossberg): nicer would be to cast to some JSCallable here...
     return Object::GetPropertyWithDefinedGetter(
@@ -1261,12 +1252,11 @@
   Handle<Object> setter(AccessorPair::cast(*structure)->setter(), isolate);
   if (setter->IsFunctionTemplateInfo()) {
     Handle<Object> argv[] = {value};
-    auto result =
-        Builtins::InvokeApiFunction(Handle<FunctionTemplateInfo>::cast(setter),
-                                    receiver, arraysize(argv), argv);
-    if (isolate->has_pending_exception()) {
-      return Nothing<bool>();
-    }
+    RETURN_ON_EXCEPTION_VALUE(
+        isolate, Builtins::InvokeApiFunction(
+                     isolate, Handle<FunctionTemplateInfo>::cast(setter),
+                     receiver, arraysize(argv), argv),
+        Nothing<bool>());
     return Just(true);
   } else if (setter->IsCallable()) {
     // TODO(rossberg): nicer would be to cast to some JSCallable here...
@@ -1318,18 +1308,6 @@
 
 
 // static
-bool Object::IsErrorObject(Isolate* isolate, Handle<Object> object) {
-  if (!object->IsJSObject()) return false;
-  // Use stack_trace_symbol as proxy for [[ErrorData]].
-  Handle<Name> symbol = isolate->factory()->stack_trace_symbol();
-  Maybe<bool> has_stack_trace =
-      JSReceiver::HasOwnProperty(Handle<JSReceiver>::cast(object), symbol);
-  DCHECK(!has_stack_trace.IsNothing());
-  return has_stack_trace.FromJust();
-}
-
-
-// static
 bool JSObject::AllCanRead(LookupIterator* it) {
   // Skip current iteration, it's in state ACCESS_CHECK or INTERCEPTOR, both of
   // which have already been checked.
@@ -1351,19 +1329,191 @@
   return false;
 }
 
+namespace {
+
+MaybeHandle<Object> GetPropertyWithInterceptorInternal(
+    LookupIterator* it, Handle<InterceptorInfo> interceptor, bool* done) {
+  *done = false;
+  Isolate* isolate = it->isolate();
+  // Make sure that the top context does not change when doing callbacks or
+  // interceptor calls.
+  AssertNoContextChange ncc(isolate);
+
+  if (interceptor->getter()->IsUndefined(isolate)) {
+    return isolate->factory()->undefined_value();
+  }
+
+  Handle<JSObject> holder = it->GetHolder<JSObject>();
+  Handle<Object> result;
+  Handle<Object> receiver = it->GetReceiver();
+  if (!receiver->IsJSReceiver()) {
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate, receiver, Object::ConvertReceiver(isolate, receiver), Object);
+  }
+  PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+                                 *holder, Object::DONT_THROW);
+
+  if (it->IsElement()) {
+    uint32_t index = it->index();
+    v8::IndexedPropertyGetterCallback getter =
+        v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
+    result = args.Call(getter, index);
+  } else {
+    Handle<Name> name = it->name();
+    DCHECK(!name->IsPrivate());
+
+    if (name->IsSymbol() && !interceptor->can_intercept_symbols()) {
+      return isolate->factory()->undefined_value();
+    }
+
+    v8::GenericNamedPropertyGetterCallback getter =
+        v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
+            interceptor->getter());
+    result = args.Call(getter, name);
+  }
+
+  RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+  if (result.is_null()) return isolate->factory()->undefined_value();
+  *done = true;
+  // Rebox handle before return
+  return handle(*result, isolate);
+}
+
+Maybe<PropertyAttributes> GetPropertyAttributesWithInterceptorInternal(
+    LookupIterator* it, Handle<InterceptorInfo> interceptor) {
+  Isolate* isolate = it->isolate();
+  // Make sure that the top context does not change when doing
+  // callbacks or interceptor calls.
+  AssertNoContextChange ncc(isolate);
+  HandleScope scope(isolate);
+
+  Handle<JSObject> holder = it->GetHolder<JSObject>();
+  if (!it->IsElement() && it->name()->IsSymbol() &&
+      !interceptor->can_intercept_symbols()) {
+    return Just(ABSENT);
+  }
+  Handle<Object> receiver = it->GetReceiver();
+  if (!receiver->IsJSReceiver()) {
+    ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
+                                     Object::ConvertReceiver(isolate, receiver),
+                                     Nothing<PropertyAttributes>());
+  }
+  PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+                                 *holder, Object::DONT_THROW);
+  if (!interceptor->query()->IsUndefined(isolate)) {
+    Handle<Object> result;
+    if (it->IsElement()) {
+      uint32_t index = it->index();
+      v8::IndexedPropertyQueryCallback query =
+          v8::ToCData<v8::IndexedPropertyQueryCallback>(interceptor->query());
+      result = args.Call(query, index);
+    } else {
+      Handle<Name> name = it->name();
+      DCHECK(!name->IsPrivate());
+      v8::GenericNamedPropertyQueryCallback query =
+          v8::ToCData<v8::GenericNamedPropertyQueryCallback>(
+              interceptor->query());
+      result = args.Call(query, name);
+    }
+    if (!result.is_null()) {
+      int32_t value;
+      CHECK(result->ToInt32(&value));
+      return Just(static_cast<PropertyAttributes>(value));
+    }
+  } else if (!interceptor->getter()->IsUndefined(isolate)) {
+    // TODO(verwaest): Use GetPropertyWithInterceptor?
+    Handle<Object> result;
+    if (it->IsElement()) {
+      uint32_t index = it->index();
+      v8::IndexedPropertyGetterCallback getter =
+          v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
+      result = args.Call(getter, index);
+    } else {
+      Handle<Name> name = it->name();
+      DCHECK(!name->IsPrivate());
+      v8::GenericNamedPropertyGetterCallback getter =
+          v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
+              interceptor->getter());
+      result = args.Call(getter, name);
+    }
+    if (!result.is_null()) return Just(DONT_ENUM);
+  }
+
+  RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<PropertyAttributes>());
+  return Just(ABSENT);
+}
+
+Maybe<bool> SetPropertyWithInterceptorInternal(
+    LookupIterator* it, Handle<InterceptorInfo> interceptor,
+    Object::ShouldThrow should_throw, Handle<Object> value) {
+  Isolate* isolate = it->isolate();
+  // Make sure that the top context does not change when doing callbacks or
+  // interceptor calls.
+  AssertNoContextChange ncc(isolate);
+
+  if (interceptor->setter()->IsUndefined(isolate)) return Just(false);
+
+  Handle<JSObject> holder = it->GetHolder<JSObject>();
+  bool result;
+  Handle<Object> receiver = it->GetReceiver();
+  if (!receiver->IsJSReceiver()) {
+    ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
+                                     Object::ConvertReceiver(isolate, receiver),
+                                     Nothing<bool>());
+  }
+  PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+                                 *holder, should_throw);
+
+  if (it->IsElement()) {
+    uint32_t index = it->index();
+    v8::IndexedPropertySetterCallback setter =
+        v8::ToCData<v8::IndexedPropertySetterCallback>(interceptor->setter());
+    // TODO(neis): In the future, we may want to actually return the
+    // interceptor's result, which then should be a boolean.
+    result = !args.Call(setter, index, value).is_null();
+  } else {
+    Handle<Name> name = it->name();
+    DCHECK(!name->IsPrivate());
+
+    if (name->IsSymbol() && !interceptor->can_intercept_symbols()) {
+      return Just(false);
+    }
+
+    v8::GenericNamedPropertySetterCallback setter =
+        v8::ToCData<v8::GenericNamedPropertySetterCallback>(
+            interceptor->setter());
+    result = !args.Call(setter, name, value).is_null();
+  }
+
+  RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
+  return Just(result);
+}
+
+}  // namespace
 
 MaybeHandle<Object> JSObject::GetPropertyWithFailedAccessCheck(
     LookupIterator* it) {
+  Isolate* isolate = it->isolate();
   Handle<JSObject> checked = it->GetHolder<JSObject>();
-  while (AllCanRead(it)) {
-    if (it->state() == LookupIterator::ACCESSOR) {
-      return GetPropertyWithAccessor(it);
+  Handle<InterceptorInfo> interceptor =
+      it->GetInterceptorForFailedAccessCheck();
+  if (interceptor.is_null()) {
+    while (AllCanRead(it)) {
+      if (it->state() == LookupIterator::ACCESSOR) {
+        return GetPropertyWithAccessor(it);
+      }
+      DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
+      bool done;
+      Handle<Object> result;
+      ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+                                 GetPropertyWithInterceptor(it, &done), Object);
+      if (done) return result;
     }
-    DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
+  } else {
+    MaybeHandle<Object> result;
     bool done;
-    Handle<Object> result;
-    ASSIGN_RETURN_ON_EXCEPTION(it->isolate(), result,
-                               GetPropertyWithInterceptor(it, &done), Object);
+    result = GetPropertyWithInterceptorInternal(it, interceptor, &done);
+    RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
     if (done) return result;
   }
 
@@ -1374,27 +1524,36 @@
     return it->factory()->undefined_value();
   }
 
-  it->isolate()->ReportFailedAccessCheck(checked);
-  RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it->isolate(), Object);
+  isolate->ReportFailedAccessCheck(checked);
+  RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
   return it->factory()->undefined_value();
 }
 
 
 Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithFailedAccessCheck(
     LookupIterator* it) {
+  Isolate* isolate = it->isolate();
   Handle<JSObject> checked = it->GetHolder<JSObject>();
-  while (AllCanRead(it)) {
-    if (it->state() == LookupIterator::ACCESSOR) {
-      return Just(it->property_attributes());
+  Handle<InterceptorInfo> interceptor =
+      it->GetInterceptorForFailedAccessCheck();
+  if (interceptor.is_null()) {
+    while (AllCanRead(it)) {
+      if (it->state() == LookupIterator::ACCESSOR) {
+        return Just(it->property_attributes());
+      }
+      DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
+      auto result = GetPropertyAttributesWithInterceptor(it);
+      if (isolate->has_scheduled_exception()) break;
+      if (result.IsJust() && result.FromJust() != ABSENT) return result;
     }
-    DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
-    auto result = GetPropertyAttributesWithInterceptor(it);
-    if (it->isolate()->has_scheduled_exception()) break;
-    if (result.IsJust() && result.FromJust() != ABSENT) return result;
+  } else {
+    Maybe<PropertyAttributes> result =
+        GetPropertyAttributesWithInterceptorInternal(it, interceptor);
+    RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<PropertyAttributes>());
+    if (result.FromMaybe(ABSENT) != ABSENT) return result;
   }
-  it->isolate()->ReportFailedAccessCheck(checked);
-  RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(),
-                                      Nothing<PropertyAttributes>());
+  isolate->ReportFailedAccessCheck(checked);
+  RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<PropertyAttributes>());
   return Just(ABSENT);
 }
 
@@ -1415,13 +1574,23 @@
 
 Maybe<bool> JSObject::SetPropertyWithFailedAccessCheck(
     LookupIterator* it, Handle<Object> value, ShouldThrow should_throw) {
+  Isolate* isolate = it->isolate();
   Handle<JSObject> checked = it->GetHolder<JSObject>();
-  if (AllCanWrite(it)) {
-    return SetPropertyWithAccessor(it, value, should_throw);
+  Handle<InterceptorInfo> interceptor =
+      it->GetInterceptorForFailedAccessCheck();
+  if (interceptor.is_null()) {
+    if (AllCanWrite(it)) {
+      return SetPropertyWithAccessor(it, value, should_throw);
+    }
+  } else {
+    Maybe<bool> result = SetPropertyWithInterceptorInternal(
+        it, interceptor, should_throw, value);
+    RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+    if (result.IsJust()) return result;
   }
 
-  it->isolate()->ReportFailedAccessCheck(checked);
-  RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
+  isolate->ReportFailedAccessCheck(checked);
+  RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
   return Just(true);
 }
 
@@ -1441,10 +1610,12 @@
 
     int entry = property_dictionary->FindEntry(name);
     if (entry == GlobalDictionary::kNotFound) {
-      auto cell = object->GetIsolate()->factory()->NewPropertyCell();
+      Isolate* isolate = object->GetIsolate();
+      auto cell = isolate->factory()->NewPropertyCell();
       cell->set_value(*value);
-      auto cell_type = value->IsUndefined() ? PropertyCellType::kUndefined
-                                            : PropertyCellType::kConstant;
+      auto cell_type = value->IsUndefined(isolate)
+                           ? PropertyCellType::kUndefined
+                           : PropertyCellType::kConstant;
       details = details.set_cell_type(cell_type);
       value = cell;
       property_dictionary =
@@ -1475,7 +1646,7 @@
 Maybe<bool> JSReceiver::HasInPrototypeChain(Isolate* isolate,
                                             Handle<JSReceiver> object,
                                             Handle<Object> proto) {
-  PrototypeIterator iter(isolate, object, PrototypeIterator::START_AT_RECEIVER);
+  PrototypeIterator iter(isolate, object, kStartAtReceiver);
   while (true) {
     if (!iter.AdvanceFollowingProxies()) return Nothing<bool>();
     if (iter.IsAtEnd()) return Just(false);
@@ -1509,28 +1680,21 @@
   return isolate->heap()->null_value()->map();
 }
 
+namespace {
 
-Object* Object::GetHash() {
-  Object* hash = GetSimpleHash();
-  if (hash->IsSmi()) return hash;
-
-  DisallowHeapAllocation no_gc;
-  DCHECK(IsJSReceiver());
-  JSReceiver* receiver = JSReceiver::cast(this);
-  Isolate* isolate = receiver->GetIsolate();
-  return *JSReceiver::GetIdentityHash(isolate, handle(receiver, isolate));
-}
-
-
-Object* Object::GetSimpleHash() {
+// Returns a non-SMI for JSObjects, but returns the hash code for simple
+// objects.  This avoids a double lookup in the cases where we know we will
+// add the hash to the JSObject if it does not already exist.
+Object* GetSimpleHash(Object* object) {
   // The object is either a Smi, a HeapNumber, a name, an odd-ball,
   // a SIMD value type, a real JS object, or a Harmony proxy.
-  if (IsSmi()) {
-    uint32_t hash = ComputeIntegerHash(Smi::cast(this)->value(), kZeroHashSeed);
+  if (object->IsSmi()) {
+    uint32_t hash =
+        ComputeIntegerHash(Smi::cast(object)->value(), kZeroHashSeed);
     return Smi::FromInt(hash & Smi::kMaxValue);
   }
-  if (IsHeapNumber()) {
-    double num = HeapNumber::cast(this)->value();
+  if (object->IsHeapNumber()) {
+    double num = HeapNumber::cast(object)->value();
     if (std::isnan(num)) return Smi::FromInt(Smi::kMaxValue);
     if (i::IsMinusZero(num)) num = 0;
     if (IsSmiDouble(num)) {
@@ -1539,30 +1703,43 @@
     uint32_t hash = ComputeLongHash(double_to_uint64(num));
     return Smi::FromInt(hash & Smi::kMaxValue);
   }
-  if (IsName()) {
-    uint32_t hash = Name::cast(this)->Hash();
+  if (object->IsName()) {
+    uint32_t hash = Name::cast(object)->Hash();
     return Smi::FromInt(hash);
   }
-  if (IsOddball()) {
-    uint32_t hash = Oddball::cast(this)->to_string()->Hash();
+  if (object->IsOddball()) {
+    uint32_t hash = Oddball::cast(object)->to_string()->Hash();
     return Smi::FromInt(hash);
   }
-  if (IsSimd128Value()) {
-    uint32_t hash = Simd128Value::cast(this)->Hash();
+  if (object->IsSimd128Value()) {
+    uint32_t hash = Simd128Value::cast(object)->Hash();
     return Smi::FromInt(hash & Smi::kMaxValue);
   }
-  DCHECK(IsJSReceiver());
-  JSReceiver* receiver = JSReceiver::cast(this);
-  return receiver->GetHeap()->undefined_value();
+  DCHECK(object->IsJSReceiver());
+  // Simply return the receiver as it is guaranteed to not be a SMI.
+  return object;
 }
 
+}  // namespace
 
-Handle<Smi> Object::GetOrCreateHash(Isolate* isolate, Handle<Object> object) {
-  Handle<Object> hash(object->GetSimpleHash(), isolate);
-  if (hash->IsSmi()) return Handle<Smi>::cast(hash);
+Object* Object::GetHash() {
+  Object* hash = GetSimpleHash(this);
+  if (hash->IsSmi()) return hash;
+
+  DisallowHeapAllocation no_gc;
+  DCHECK(IsJSReceiver());
+  JSReceiver* receiver = JSReceiver::cast(this);
+  Isolate* isolate = receiver->GetIsolate();
+  return JSReceiver::GetIdentityHash(isolate, handle(receiver, isolate));
+}
+
+Smi* Object::GetOrCreateHash(Isolate* isolate, Handle<Object> object) {
+  Object* hash = GetSimpleHash(*object);
+  if (hash->IsSmi()) return Smi::cast(hash);
 
   DCHECK(object->IsJSReceiver());
-  return JSReceiver::GetOrCreateIdentityHash(Handle<JSReceiver>::cast(object));
+  return JSReceiver::GetOrCreateIdentityHash(isolate,
+                                             Handle<JSReceiver>::cast(object));
 }
 
 
@@ -1644,9 +1821,6 @@
 MaybeHandle<Object> Object::ArraySpeciesConstructor(
     Isolate* isolate, Handle<Object> original_array) {
   Handle<Object> default_species = isolate->array_function();
-  if (!FLAG_harmony_species) {
-    return default_species;
-  }
   if (original_array->IsJSArray() &&
       Handle<JSArray>::cast(original_array)->HasArrayPrototype(isolate) &&
       isolate->IsArraySpeciesLookupChainIntact()) {
@@ -1678,12 +1852,12 @@
           JSReceiver::GetProperty(Handle<JSReceiver>::cast(constructor),
                                   isolate->factory()->species_symbol()),
           Object);
-      if (constructor->IsNull()) {
+      if (constructor->IsNull(isolate)) {
         constructor = isolate->factory()->undefined_value();
       }
     }
   }
-  if (constructor->IsUndefined()) {
+  if (constructor->IsUndefined(isolate)) {
     return default_species;
   } else {
     if (!constructor->IsConstructor()) {
@@ -1908,8 +2082,7 @@
   return true;
 }
 
-
-void String::StringShortPrint(StringStream* accumulator) {
+void String::StringShortPrint(StringStream* accumulator, bool show_details) {
   int len = length();
   if (len > kMaxShortPrintLength) {
     accumulator->Add("<Very long string[%u]>", len);
@@ -1938,15 +2111,15 @@
   }
   stream.Reset(this);
   if (one_byte) {
-    accumulator->Add("<String[%u]: ", length());
+    if (show_details) accumulator->Add("<String[%u]: ", length());
     for (int i = 0; i < len; i++) {
       accumulator->Put(static_cast<char>(stream.GetNext()));
     }
-    accumulator->Put('>');
+    if (show_details) accumulator->Put('>');
   } else {
     // Backslash indicates that the string contains control
     // characters and that backslashes are therefore escaped.
-    accumulator->Add("<String[%u]\\: ", length());
+    if (show_details) accumulator->Add("<String[%u]\\: ", length());
     for (int i = 0; i < len; i++) {
       uint16_t c = stream.GetNext();
       if (c == '\n') {
@@ -1966,7 +2139,7 @@
       accumulator->Put('.');
       accumulator->Put('.');
     }
-    accumulator->Put('>');
+    if (show_details) accumulator->Put('>');
   }
   return;
 }
@@ -1984,9 +2157,9 @@
 void JSObject::JSObjectShortPrint(StringStream* accumulator) {
   switch (map()->instance_type()) {
     case JS_ARRAY_TYPE: {
-      double length = JSArray::cast(this)->length()->IsUndefined()
-          ? 0
-          : JSArray::cast(this)->length()->Number();
+      double length = JSArray::cast(this)->length()->IsUndefined(GetIsolate())
+                          ? 0
+                          : JSArray::cast(this)->length()->Number();
       accumulator->Add("<JS Array[%u]>", static_cast<uint32_t>(length));
       break;
     }
@@ -2222,6 +2395,7 @@
 
 void HeapObject::HeapObjectShortPrint(std::ostream& os) {  // NOLINT
   Heap* heap = GetHeap();
+  Isolate* isolate = heap->isolate();
   if (!heap->Contains(this)) {
     os << "!!!INVALID POINTER!!!";
     return;
@@ -2307,15 +2481,15 @@
       break;
     }
     case ODDBALL_TYPE: {
-      if (IsUndefined()) {
+      if (IsUndefined(isolate)) {
         os << "<undefined>";
-      } else if (IsTheHole()) {
+      } else if (IsTheHole(isolate)) {
         os << "<the hole>";
-      } else if (IsNull()) {
+      } else if (IsNull(isolate)) {
         os << "<null>";
-      } else if (IsTrue()) {
+      } else if (IsTrue(isolate)) {
         os << "<true>";
-      } else if (IsFalse()) {
+      } else if (IsFalse(isolate)) {
         os << "<false>";
       } else {
         os << "<Odd Oddball: ";
@@ -2551,25 +2725,6 @@
 }
 
 
-MaybeHandle<String> JSReceiver::BuiltinStringTag(Handle<JSReceiver> object) {
-  Maybe<bool> is_array = Object::IsArray(object);
-  MAYBE_RETURN(is_array, MaybeHandle<String>());
-  Isolate* const isolate = object->GetIsolate();
-  if (is_array.FromJust()) {
-    return isolate->factory()->Array_string();
-  }
-  // TODO(adamk): According to ES2015, we should return "Function" when
-  // object has a [[Call]] internal method (corresponds to IsCallable).
-  // But this is well cemented in layout tests and might cause webbreakage.
-  // if (object->IsCallable()) {
-  //   return isolate->factory()->Function_string();
-  // }
-  // TODO(adamk): class_name() is expensive, replace with instance type
-  // checks where possible.
-  return handle(object->class_name(), isolate);
-}
-
-
 // static
 Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
   Isolate* isolate = receiver->GetIsolate();
@@ -2718,8 +2873,9 @@
     } else {
       auto cell = isolate->factory()->NewPropertyCell();
       cell->set_value(*value);
-      auto cell_type = value->IsUndefined() ? PropertyCellType::kUndefined
-                                            : PropertyCellType::kConstant;
+      auto cell_type = value->IsUndefined(isolate)
+                           ? PropertyCellType::kUndefined
+                           : PropertyCellType::kConstant;
       details = details.set_cell_type(cell_type);
       value = cell;
 
@@ -2802,7 +2958,6 @@
 void JSObject::UpdatePrototypeUserRegistration(Handle<Map> old_map,
                                                Handle<Map> new_map,
                                                Isolate* isolate) {
-  if (!FLAG_track_prototype_users) return;
   if (!old_map->is_prototype_map()) return;
   DCHECK(new_map->is_prototype_map());
   bool was_registered = JSObject::UnregisterPrototypeUser(old_map, isolate);
@@ -3199,7 +3354,7 @@
       // Ensure that no transition was inserted for prototype migrations.
       DCHECK_EQ(
           0, TransitionArray::NumberOfTransitions(old_map->raw_transitions()));
-      DCHECK(new_map->GetBackPointer()->IsUndefined());
+      DCHECK(new_map->GetBackPointer()->IsUndefined(new_map->GetIsolate()));
     }
   } else {
     MigrateFastToSlow(object, new_map, expected_additional_properties);
@@ -3311,17 +3466,18 @@
 // proper sharing of descriptor arrays.
 void Map::ReplaceDescriptors(DescriptorArray* new_descriptors,
                              LayoutDescriptor* new_layout_descriptor) {
+  Isolate* isolate = GetIsolate();
   // Don't overwrite the empty descriptor array or initial map's descriptors.
-  if (NumberOfOwnDescriptors() == 0 || GetBackPointer()->IsUndefined()) {
+  if (NumberOfOwnDescriptors() == 0 || GetBackPointer()->IsUndefined(isolate)) {
     return;
   }
 
   DescriptorArray* to_replace = instance_descriptors();
-  GetHeap()->incremental_marking()->IterateBlackObject(to_replace);
+  isolate->heap()->incremental_marking()->IterateBlackObject(to_replace);
   Map* current = this;
   while (current->instance_descriptors() == to_replace) {
     Object* next = current->GetBackPointer();
-    if (next->IsUndefined()) break;  // Stop overwriting at initial map.
+    if (next->IsUndefined(isolate)) break;  // Stop overwriting at initial map.
     current->SetEnumLength(kInvalidEnumCacheSentinel);
     current->UpdateDescriptors(new_descriptors, new_layout_descriptor);
     current = Map::cast(next);
@@ -3332,9 +3488,10 @@
 
 Map* Map::FindRootMap() {
   Map* result = this;
+  Isolate* isolate = GetIsolate();
   while (true) {
     Object* back = result->GetBackPointer();
-    if (back->IsUndefined()) {
+    if (back->IsUndefined(isolate)) {
       // Initial map always owns descriptors and doesn't have unused entries
       // in the descriptor array.
       DCHECK(result->owns_descriptors());
@@ -3392,9 +3549,10 @@
   DisallowHeapAllocation no_allocation;
   DCHECK_EQ(DATA, instance_descriptors()->GetDetails(descriptor).type());
   Map* result = this;
+  Isolate* isolate = GetIsolate();
   while (true) {
     Object* back = result->GetBackPointer();
-    if (back->IsUndefined()) break;
+    if (back->IsUndefined(isolate)) break;
     Map* parent = Map::cast(back);
     if (parent->NumberOfOwnDescriptors() <= descriptor) break;
     result = parent;
@@ -4200,56 +4358,14 @@
                              ALLOW_IN_DESCRIPTOR);
 }
 
-
 Maybe<bool> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
                                                  ShouldThrow should_throw,
                                                  Handle<Object> value) {
-  Isolate* isolate = it->isolate();
-  // Make sure that the top context does not change when doing callbacks or
-  // interceptor calls.
-  AssertNoContextChange ncc(isolate);
-
   DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
-  Handle<InterceptorInfo> interceptor(it->GetInterceptor());
-  if (interceptor->setter()->IsUndefined()) return Just(false);
-
-  Handle<JSObject> holder = it->GetHolder<JSObject>();
-  bool result;
-  Handle<Object> receiver = it->GetReceiver();
-  if (!receiver->IsJSReceiver()) {
-    ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
-                                     Object::ConvertReceiver(isolate, receiver),
-                                     Nothing<bool>());
-  }
-  PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
-                                 *holder, should_throw);
-
-  if (it->IsElement()) {
-    uint32_t index = it->index();
-    v8::IndexedPropertySetterCallback setter =
-        v8::ToCData<v8::IndexedPropertySetterCallback>(interceptor->setter());
-    // TODO(neis): In the future, we may want to actually return the
-    // interceptor's result, which then should be a boolean.
-    result = !args.Call(setter, index, value).is_null();
-  } else {
-    Handle<Name> name = it->name();
-    DCHECK(!name->IsPrivate());
-
-    if (name->IsSymbol() && !interceptor->can_intercept_symbols()) {
-      return Just(false);
-    }
-
-    v8::GenericNamedPropertySetterCallback setter =
-        v8::ToCData<v8::GenericNamedPropertySetterCallback>(
-            interceptor->setter());
-    result = !args.Call(setter, name, value).is_null();
-  }
-
-  RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
-  return Just(result);
+  return SetPropertyWithInterceptorInternal(it, it->GetInterceptor(),
+                                            should_throw, value);
 }
 
-
 MaybeHandle<Object> Object::SetProperty(Handle<Object> object,
                                         Handle<Name> name, Handle<Object> value,
                                         LanguageMode language_mode,
@@ -4291,15 +4407,18 @@
                                     value, it->GetReceiver(), language_mode);
 
       case LookupIterator::INTERCEPTOR: {
-        Handle<Map> store_target_map =
-            handle(it->GetStoreTarget()->map(), it->isolate());
+        Handle<Map> store_target_map;
+        if (it->GetReceiver()->IsJSObject()) {
+          store_target_map = handle(it->GetStoreTarget()->map(), it->isolate());
+        }
         if (it->HolderIsReceiverOrHiddenPrototype()) {
           Maybe<bool> result =
               JSObject::SetPropertyWithInterceptor(it, should_throw, value);
           if (result.IsNothing() || result.FromJust()) return result;
           // Interceptor modified the store target but failed to set the
           // property.
-          Utils::ApiCheck(*store_target_map == it->GetStoreTarget()->map(),
+          Utils::ApiCheck(store_target_map.is_null() ||
+                              *store_target_map == it->GetStoreTarget()->map(),
                           it->IsElement() ? "v8::IndexedPropertySetterCallback"
                                           : "v8::NamedPropertySetterCallback",
                           "Interceptor silently changed store target.");
@@ -4312,7 +4431,8 @@
           }
           // Interceptor modified the store target but failed to set the
           // property.
-          Utils::ApiCheck(*store_target_map == it->GetStoreTarget()->map(),
+          Utils::ApiCheck(store_target_map.is_null() ||
+                              *store_target_map == it->GetStoreTarget()->map(),
                           it->IsElement() ? "v8::IndexedPropertySetterCallback"
                                           : "v8::NamedPropertySetterCallback",
                           "Interceptor silently changed store target.");
@@ -4537,7 +4657,7 @@
   Handle<Object> to_assign = value;
   // Convert the incoming value to a number for storing into typed arrays.
   if (it->IsElement() && receiver->HasFixedTypedArrayElements()) {
-    if (!value->IsNumber() && !value->IsUndefined()) {
+    if (!value->IsNumber() && !value->IsUndefined(it->isolate())) {
       ASSIGN_RETURN_ON_EXCEPTION_VALUE(
           it->isolate(), to_assign, Object::ToNumber(value), Nothing<bool>());
       // We have to recheck the length. However, it can only change if the
@@ -4676,13 +4796,14 @@
     new_descriptors->CopyEnumCacheFrom(*descriptors);
   }
 
+  Isolate* isolate = map->GetIsolate();
   // Replace descriptors by new_descriptors in all maps that share it.
-  map->GetHeap()->incremental_marking()->IterateBlackObject(*descriptors);
+  isolate->heap()->incremental_marking()->IterateBlackObject(*descriptors);
 
   Map* current = *map;
   while (current->instance_descriptors() == *descriptors) {
     Object* next = current->GetBackPointer();
-    if (next->IsUndefined()) break;  // Stop overwriting at initial map.
+    if (next->IsUndefined(isolate)) break;  // Stop overwriting at initial map.
     current->UpdateDescriptors(*new_descriptors, layout_descriptor);
     current = Map::cast(next);
   }
@@ -4942,7 +5063,7 @@
     }
   }
 
-  DCHECK(!map->IsUndefined());
+  DCHECK(!map->IsUndefined(isolate));
   // Check if we can go back in the elements kind transition chain.
   if (IsHoleyElementsKind(from_kind) &&
       to_kind == GetPackedElementsKind(from_kind) &&
@@ -5016,7 +5137,7 @@
                                        isolate->factory()->has_string()),
       Nothing<bool>());
   // 7. If trap is undefined, then
-  if (trap->IsUndefined()) {
+  if (trap->IsUndefined(isolate)) {
     // 7a. Return target.[[HasProperty]](P).
     return JSReceiver::HasProperty(target, name);
   }
@@ -5082,7 +5203,7 @@
   Handle<Object> trap;
   ASSIGN_RETURN_ON_EXCEPTION_VALUE(
       isolate, trap, Object::GetMethod(handler, trap_name), Nothing<bool>());
-  if (trap->IsUndefined()) {
+  if (trap->IsUndefined(isolate)) {
     LookupIterator it =
         LookupIterator::PropertyOrElement(isolate, receiver, name, target);
     return Object::SetSuperProperty(&it, value, language_mode,
@@ -5118,7 +5239,7 @@
     }
     inconsistent = PropertyDescriptor::IsAccessorDescriptor(&target_desc) &&
                    !target_desc.configurable() &&
-                   target_desc.set()->IsUndefined();
+                   target_desc.set()->IsUndefined(isolate);
     if (inconsistent) {
       isolate->Throw(*isolate->factory()->NewTypeError(
           MessageTemplate::kProxySetFrozenAccessor, name));
@@ -5151,7 +5272,7 @@
   Handle<Object> trap;
   ASSIGN_RETURN_ON_EXCEPTION_VALUE(
       isolate, trap, Object::GetMethod(handler, trap_name), Nothing<bool>());
-  if (trap->IsUndefined()) {
+  if (trap->IsUndefined(isolate)) {
     return JSReceiver::DeletePropertyOrElement(target, name, language_mode);
   }
 
@@ -5500,7 +5621,7 @@
 MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
     Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
     PropertyAttributes attributes) {
-  DCHECK(!value->IsTheHole());
+  DCHECK(!value->IsTheHole(object->GetIsolate()));
   LookupIterator it(object, name, object, LookupIterator::OWN);
   return DefineOwnPropertyIgnoreAttributes(&it, value, attributes);
 }
@@ -5522,73 +5643,11 @@
   return DefineOwnPropertyIgnoreAttributes(&it, value, attributes);
 }
 
-
 Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor(
     LookupIterator* it) {
-  Isolate* isolate = it->isolate();
-  // Make sure that the top context does not change when doing
-  // callbacks or interceptor calls.
-  AssertNoContextChange ncc(isolate);
-  HandleScope scope(isolate);
-
-  Handle<JSObject> holder = it->GetHolder<JSObject>();
-  Handle<InterceptorInfo> interceptor(it->GetInterceptor());
-  if (!it->IsElement() && it->name()->IsSymbol() &&
-      !interceptor->can_intercept_symbols()) {
-    return Just(ABSENT);
-  }
-  Handle<Object> receiver = it->GetReceiver();
-  if (!receiver->IsJSReceiver()) {
-    ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
-                                     Object::ConvertReceiver(isolate, receiver),
-                                     Nothing<PropertyAttributes>());
-  }
-  PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
-                                 *holder, Object::DONT_THROW);
-  if (!interceptor->query()->IsUndefined()) {
-    Handle<Object> result;
-    if (it->IsElement()) {
-      uint32_t index = it->index();
-      v8::IndexedPropertyQueryCallback query =
-          v8::ToCData<v8::IndexedPropertyQueryCallback>(interceptor->query());
-      result = args.Call(query, index);
-    } else {
-      Handle<Name> name = it->name();
-      DCHECK(!name->IsPrivate());
-      v8::GenericNamedPropertyQueryCallback query =
-          v8::ToCData<v8::GenericNamedPropertyQueryCallback>(
-              interceptor->query());
-      result = args.Call(query, name);
-    }
-    if (!result.is_null()) {
-      int32_t value;
-      CHECK(result->ToInt32(&value));
-      return Just(static_cast<PropertyAttributes>(value));
-    }
-  } else if (!interceptor->getter()->IsUndefined()) {
-    // TODO(verwaest): Use GetPropertyWithInterceptor?
-    Handle<Object> result;
-    if (it->IsElement()) {
-      uint32_t index = it->index();
-      v8::IndexedPropertyGetterCallback getter =
-          v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
-      result = args.Call(getter, index);
-    } else {
-      Handle<Name> name = it->name();
-      DCHECK(!name->IsPrivate());
-      v8::GenericNamedPropertyGetterCallback getter =
-          v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
-              interceptor->getter());
-      result = args.Call(getter, name);
-    }
-    if (!result.is_null()) return Just(DONT_ENUM);
-  }
-
-  RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<PropertyAttributes>());
-  return Just(ABSENT);
+  return GetPropertyAttributesWithInterceptorInternal(it, it->GetInterceptor());
 }
 
-
 Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
     LookupIterator* it) {
   for (; it->IsFound(); it->Next()) {
@@ -5703,7 +5762,7 @@
   // Compute the length of the instance descriptor.
   for (int i = 0; i < instance_descriptor_length; i++) {
     int index = Smi::cast(iteration_order->get(i))->value();
-    DCHECK(dictionary->IsKey(dictionary->KeyAt(index)));
+    DCHECK(dictionary->IsKey(isolate, dictionary->KeyAt(index)));
 
     Object* value = dictionary->ValueAt(index);
     PropertyType type = dictionary->DetailsAt(index).type();
@@ -5919,60 +5978,56 @@
   return Smi::FromInt(hash_value);
 }
 
+template <typename ProxyType>
+static Smi* GetOrCreateIdentityHashHelper(Isolate* isolate,
+                                          Handle<ProxyType> proxy) {
+  Object* maybe_hash = proxy->hash();
+  if (maybe_hash->IsSmi()) return Smi::cast(maybe_hash);
 
-template<typename ProxyType>
-static Handle<Smi> GetOrCreateIdentityHashHelper(Handle<ProxyType> proxy) {
-  Isolate* isolate = proxy->GetIsolate();
-
-  Handle<Object> maybe_hash(proxy->hash(), isolate);
-  if (maybe_hash->IsSmi()) return Handle<Smi>::cast(maybe_hash);
-
-  Handle<Smi> hash(GenerateIdentityHash(isolate), isolate);
-  proxy->set_hash(*hash);
+  Smi* hash = GenerateIdentityHash(isolate);
+  proxy->set_hash(hash);
   return hash;
 }
 
 // static
-Handle<Object> JSObject::GetIdentityHash(Isolate* isolate,
-                                         Handle<JSObject> object) {
+Object* JSObject::GetIdentityHash(Isolate* isolate, Handle<JSObject> object) {
   if (object->IsJSGlobalProxy()) {
-    return handle(JSGlobalProxy::cast(*object)->hash(), isolate);
+    return JSGlobalProxy::cast(*object)->hash();
   }
   Handle<Name> hash_code_symbol = isolate->factory()->hash_code_symbol();
-  return JSReceiver::GetDataProperty(object, hash_code_symbol);
+  return *JSReceiver::GetDataProperty(object, hash_code_symbol);
 }
 
 // static
-Handle<Smi> JSObject::GetOrCreateIdentityHash(Handle<JSObject> object) {
+Smi* JSObject::GetOrCreateIdentityHash(Isolate* isolate,
+                                       Handle<JSObject> object) {
   if (object->IsJSGlobalProxy()) {
-    return GetOrCreateIdentityHashHelper(Handle<JSGlobalProxy>::cast(object));
+    return GetOrCreateIdentityHashHelper(isolate,
+                                         Handle<JSGlobalProxy>::cast(object));
   }
-  Isolate* isolate = object->GetIsolate();
 
   Handle<Name> hash_code_symbol = isolate->factory()->hash_code_symbol();
   LookupIterator it(object, hash_code_symbol, object, LookupIterator::OWN);
   if (it.IsFound()) {
     DCHECK_EQ(LookupIterator::DATA, it.state());
-    Handle<Object> maybe_hash = it.GetDataValue();
-    if (maybe_hash->IsSmi()) return Handle<Smi>::cast(maybe_hash);
+    Object* maybe_hash = *it.GetDataValue();
+    if (maybe_hash->IsSmi()) return Smi::cast(maybe_hash);
   }
 
-  Handle<Smi> hash(GenerateIdentityHash(isolate), isolate);
-  CHECK(AddDataProperty(&it, hash, NONE, THROW_ON_ERROR,
+  Smi* hash = GenerateIdentityHash(isolate);
+  CHECK(AddDataProperty(&it, handle(hash, isolate), NONE, THROW_ON_ERROR,
                         CERTAINLY_NOT_STORE_FROM_KEYED)
             .IsJust());
   return hash;
 }
 
 // static
-Handle<Object> JSProxy::GetIdentityHash(Isolate* isolate,
-                                        Handle<JSProxy> proxy) {
-  return handle(proxy->hash(), isolate);
+Object* JSProxy::GetIdentityHash(Handle<JSProxy> proxy) {
+  return proxy->hash();
 }
 
-
-Handle<Smi> JSProxy::GetOrCreateIdentityHash(Handle<JSProxy> proxy) {
-  return GetOrCreateIdentityHashHelper(proxy);
+Smi* JSProxy::GetOrCreateIdentityHash(Isolate* isolate, Handle<JSProxy> proxy) {
+  return GetOrCreateIdentityHashHelper(isolate, proxy);
 }
 
 
@@ -5985,7 +6040,7 @@
 
   DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
   Handle<InterceptorInfo> interceptor(it->GetInterceptor());
-  if (interceptor->deleter()->IsUndefined()) return Nothing<bool>();
+  if (interceptor->deleter()->IsUndefined(isolate)) return Nothing<bool>();
 
   Handle<JSObject> holder = it->GetHolder<JSObject>();
   Handle<Object> receiver = it->GetReceiver();
@@ -6019,7 +6074,7 @@
 
   DCHECK(result->IsBoolean());
   // Rebox CustomArguments::kReturnValueOffset before returning.
-  return Just(result->IsTrue());
+  return Just(result->IsTrue(isolate));
 }
 
 
@@ -6230,7 +6285,8 @@
   // 5. ReturnIfAbrupt(keys).
   Handle<FixedArray> keys;
   ASSIGN_RETURN_ON_EXCEPTION(
-      isolate, keys, JSReceiver::GetKeys(props, OWN_ONLY, ALL_PROPERTIES),
+      isolate, keys, KeyAccumulator::GetKeys(props, KeyCollectionMode::kOwnOnly,
+                                             ALL_PROPERTIES),
       Object);
   // 6. Let descriptors be an empty List.
   int capacity = keys->length();
@@ -6921,7 +6977,7 @@
       Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name),
       Nothing<bool>());
   // 7. If trap is undefined, then:
-  if (trap->IsUndefined()) {
+  if (trap->IsUndefined(isolate)) {
     // 7a. Return target.[[DefineOwnProperty]](P, Desc).
     return JSReceiver::DefineOwnProperty(isolate, target, key, desc,
                                          should_throw);
@@ -7139,7 +7195,7 @@
       Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name),
       Nothing<bool>());
   // 7. If trap is undefined, then
-  if (trap->IsUndefined()) {
+  if (trap->IsUndefined(isolate)) {
     // 7a. Return target.[[GetOwnProperty]](P).
     return JSReceiver::GetOwnPropertyDescriptor(isolate, target, name, desc);
   }
@@ -7152,7 +7208,8 @@
       Nothing<bool>());
   // 9. If Type(trapResultObj) is neither Object nor Undefined, throw a
   //    TypeError exception.
-  if (!trap_result_obj->IsJSReceiver() && !trap_result_obj->IsUndefined()) {
+  if (!trap_result_obj->IsJSReceiver() &&
+      !trap_result_obj->IsUndefined(isolate)) {
     isolate->Throw(*isolate->factory()->NewTypeError(
         MessageTemplate::kProxyGetOwnPropertyDescriptorInvalid, name));
     return Nothing<bool>();
@@ -7163,7 +7220,7 @@
       JSReceiver::GetOwnPropertyDescriptor(isolate, target, name, &target_desc);
   MAYBE_RETURN(found, Nothing<bool>());
   // 11. If trapResultObj is undefined, then
-  if (trap_result_obj->IsUndefined()) {
+  if (trap_result_obj->IsUndefined(isolate)) {
     // 11a. If targetDesc is undefined, return undefined.
     if (!found.FromJust()) return Just(false);
     // 11b. If targetDesc.[[Configurable]] is false, throw a TypeError
@@ -7228,19 +7285,20 @@
 bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
                                             ElementsKind kind,
                                             Object* object) {
+  Isolate* isolate = elements->GetIsolate();
   if (IsFastObjectElementsKind(kind) || kind == FAST_STRING_WRAPPER_ELEMENTS) {
     int length = IsJSArray()
         ? Smi::cast(JSArray::cast(this)->length())->value()
         : elements->length();
     for (int i = 0; i < length; ++i) {
       Object* element = elements->get(i);
-      if (!element->IsTheHole() && element == object) return true;
+      if (!element->IsTheHole(isolate) && element == object) return true;
     }
   } else {
     DCHECK(kind == DICTIONARY_ELEMENTS || kind == SLOW_STRING_WRAPPER_ELEMENTS);
     Object* key =
         SeededNumberDictionary::cast(elements)->SlowReverseLookup(object);
-    if (!key->IsUndefined()) return true;
+    if (!key->IsUndefined(isolate)) return true;
   }
   return false;
 }
@@ -7264,7 +7322,7 @@
 
   // Check if the object is among the named properties.
   Object* key = SlowReverseLookup(obj);
-  if (!key->IsUndefined()) {
+  if (!key->IsUndefined(heap->isolate())) {
     return true;
   }
 
@@ -7302,7 +7360,7 @@
       int length = parameter_map->length();
       for (int i = 2; i < length; ++i) {
         Object* value = parameter_map->get(i);
-        if (!value->IsTheHole() && value == obj) return true;
+        if (!value->IsTheHole(heap->isolate()) && value == obj) return true;
       }
       // Check the arguments.
       FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
@@ -7489,7 +7547,7 @@
   Handle<Object> trap;
   ASSIGN_RETURN_ON_EXCEPTION_VALUE(
       isolate, trap, Object::GetMethod(handler, trap_name), Nothing<bool>());
-  if (trap->IsUndefined()) {
+  if (trap->IsUndefined(isolate)) {
     return JSReceiver::PreventExtensions(target, should_throw);
   }
 
@@ -7591,7 +7649,7 @@
   Handle<Object> trap;
   ASSIGN_RETURN_ON_EXCEPTION_VALUE(
       isolate, trap, Object::GetMethod(handler, trap_name), Nothing<bool>());
-  if (trap->IsUndefined()) {
+  if (trap->IsUndefined(isolate)) {
     return JSReceiver::IsExtensible(target);
   }
 
@@ -7635,9 +7693,10 @@
 static void ApplyAttributesToDictionary(Dictionary* dictionary,
                                         const PropertyAttributes attributes) {
   int capacity = dictionary->Capacity();
+  Isolate* isolate = dictionary->GetIsolate();
   for (int i = 0; i < capacity; i++) {
     Object* k = dictionary->KeyAt(i);
-    if (dictionary->IsKey(k) &&
+    if (dictionary->IsKey(isolate, k) &&
         !(k->IsSymbol() && Symbol::cast(k)->is_private())) {
       PropertyDetails details = dictionary->DetailsAt(i);
       int attrs = attributes;
@@ -7908,9 +7967,8 @@
       // an array.
       PropertyFilter filter = static_cast<PropertyFilter>(
           ONLY_WRITABLE | ONLY_ENUMERABLE | ONLY_CONFIGURABLE);
-      KeyAccumulator accumulator(isolate, OWN_ONLY, filter);
-      accumulator.NextPrototype();
-      accumulator.CollectOwnPropertyNames(copy);
+      KeyAccumulator accumulator(isolate, KeyCollectionMode::kOwnOnly, filter);
+      accumulator.CollectOwnPropertyNames(copy, copy);
       Handle<FixedArray> names = accumulator.GetKeys();
       for (int i = 0; i < names->length(); i++) {
         DCHECK(names->get(i)->IsName());
@@ -7965,7 +8023,7 @@
         int capacity = element_dictionary->Capacity();
         for (int i = 0; i < capacity; i++) {
           Object* k = element_dictionary->KeyAt(i);
-          if (element_dictionary->IsKey(k)) {
+          if (element_dictionary->IsKey(isolate, k)) {
             Handle<Object> value(element_dictionary->ValueAt(i), isolate);
             if (value->IsJSObject()) {
               Handle<JSObject> result;
@@ -8044,7 +8102,7 @@
   ASSIGN_RETURN_ON_EXCEPTION(
       isolate, exotic_to_prim,
       GetMethod(receiver, isolate->factory()->to_primitive_symbol()), Object);
-  if (!exotic_to_prim->IsUndefined()) {
+  if (!exotic_to_prim->IsUndefined(isolate)) {
     Handle<Object> hint_string;
     switch (hint) {
       case ToPrimitiveHint::kDefault:
@@ -8215,15 +8273,6 @@
          !has_hidden_prototype() && !is_dictionary_map();
 }
 
-MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
-                                            KeyCollectionType type,
-                                            PropertyFilter filter,
-                                            GetKeysConversion keys_conversion,
-                                            bool filter_proxy_keys) {
-  return KeyAccumulator::GetKeys(object, type, filter, keys_conversion,
-                                 filter_proxy_keys);
-}
-
 MUST_USE_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
     Isolate* isolate, Handle<JSReceiver> receiver, bool get_entries,
     Handle<FixedArray>* result) {
@@ -8314,10 +8363,13 @@
 
   PropertyFilter key_filter =
       static_cast<PropertyFilter>(filter & ~ONLY_ENUMERABLE);
-  KeyAccumulator accumulator(isolate, OWN_ONLY, key_filter);
-  MAYBE_RETURN(accumulator.CollectKeys(object, object),
-               MaybeHandle<FixedArray>());
-  Handle<FixedArray> keys = accumulator.GetKeys(CONVERT_TO_STRING);
+
+  Handle<FixedArray> keys;
+  ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+      isolate, keys,
+      KeyAccumulator::GetKeys(object, KeyCollectionMode::kOwnOnly, key_filter,
+                              GetKeysConversion::kConvertToString),
+      MaybeHandle<FixedArray>());
 
   values_or_entries = isolate->factory()->NewFixedArray(keys->length());
   int length = 0;
@@ -8430,10 +8482,10 @@
     return it->factory()->undefined_value();
   }
 
-  DCHECK(getter->IsCallable() || getter->IsUndefined() || getter->IsNull() ||
-         getter->IsFunctionTemplateInfo());
-  DCHECK(setter->IsCallable() || setter->IsUndefined() || setter->IsNull() ||
-         getter->IsFunctionTemplateInfo());
+  DCHECK(getter->IsCallable() || getter->IsUndefined(isolate) ||
+         getter->IsNull(isolate) || getter->IsFunctionTemplateInfo());
+  DCHECK(setter->IsCallable() || setter->IsUndefined(isolate) ||
+         setter->IsNull(isolate) || getter->IsFunctionTemplateInfo());
   it->TransitionToAccessorProperty(getter, setter, attributes);
 
   return isolate->factory()->undefined_value();
@@ -8551,7 +8603,8 @@
   Isolate* isolate = fast_map->GetIsolate();
   Handle<Object> maybe_cache(isolate->native_context()->normalized_map_cache(),
                              isolate);
-  bool use_cache = !fast_map->is_prototype_map() && !maybe_cache->IsUndefined();
+  bool use_cache =
+      !fast_map->is_prototype_map() && !maybe_cache->IsUndefined(isolate);
   Handle<NormalizedMapCache> cache;
   if (use_cache) cache = Handle<NormalizedMapCache>::cast(maybe_cache);
 
@@ -8762,7 +8815,7 @@
 
 void Map::ConnectTransition(Handle<Map> parent, Handle<Map> child,
                             Handle<Name> name, SimpleTransitionFlag flag) {
-  if (!parent->GetBackPointer()->IsUndefined()) {
+  if (!parent->GetBackPointer()->IsUndefined(parent->GetIsolate())) {
     parent->set_owns_descriptors(false);
   } else {
     // |parent| is initial map and it must keep the ownership, there must be no
@@ -9253,7 +9306,7 @@
           : &RuntimeCallStats::Map_TransitionToAccessorProperty);
 
   // At least one of the accessors needs to be a new value.
-  DCHECK(!getter->IsNull() || !setter->IsNull());
+  DCHECK(!getter->IsNull(isolate) || !setter->IsNull(isolate));
   DCHECK(name->IsUniqueName());
 
   // Dictionary maps can always have additional data properties.
@@ -9314,11 +9367,13 @@
     if (current_pair->Equals(*getter, *setter)) return map;
 
     bool overwriting_accessor = false;
-    if (!getter->IsNull() && !current_pair->get(ACCESSOR_GETTER)->IsNull() &&
+    if (!getter->IsNull(isolate) &&
+        !current_pair->get(ACCESSOR_GETTER)->IsNull(isolate) &&
         current_pair->get(ACCESSOR_GETTER) != *getter) {
       overwriting_accessor = true;
     }
-    if (!setter->IsNull() && !current_pair->get(ACCESSOR_SETTER)->IsNull() &&
+    if (!setter->IsNull(isolate) &&
+        !current_pair->get(ACCESSOR_SETTER)->IsNull(isolate) &&
         current_pair->get(ACCESSOR_SETTER) != *setter) {
       overwriting_accessor = true;
     }
@@ -9349,7 +9404,7 @@
 
   // Share descriptors only if map owns descriptors and it not an initial map.
   if (flag == INSERT_TRANSITION && map->owns_descriptors() &&
-      !map->GetBackPointer()->IsUndefined() &&
+      !map->GetBackPointer()->IsUndefined(map->GetIsolate()) &&
       TransitionArray::CanHaveMoreTransitions(map)) {
     return ShareDescriptor(map, descriptors, descriptor);
   }
@@ -9479,29 +9534,186 @@
                                 simple_flag);
 }
 
+// Helper class to manage a Map's code cache. The layout depends on the number
+// of entries; this is worthwhile because most code caches are very small,
+// but some are huge (thousands of entries).
+// For zero entries, the EmptyFixedArray is used.
+// For one entry, we use a 2-element FixedArray containing [name, code].
+// For 2..100 entries, we use a FixedArray with linear lookups, the layout is:
+//   [0] - number of slots that are currently in use
+//   [1] - first name
+//   [2] - first code
+//   [3] - second name
+//   [4] - second code
+//   etc.
+// For more than 128 entries, we use a CodeCacheHashTable.
+class CodeCache : public AllStatic {
+ public:
+  // Returns the new cache, to be stored on the map.
+  static Handle<FixedArray> Put(Isolate* isolate, Handle<FixedArray> cache,
+                                Handle<Name> name, Handle<Code> code) {
+    int length = cache->length();
+    if (length == 0) return PutFirstElement(isolate, name, code);
+    if (length == kEntrySize) {
+      return PutSecondElement(isolate, cache, name, code);
+    }
+    if (length <= kLinearMaxSize) {
+      Handle<FixedArray> result = PutLinearElement(isolate, cache, name, code);
+      if (!result.is_null()) return result;
+      // Fall through if linear storage is getting too large.
+    }
+    return PutHashTableElement(isolate, cache, name, code);
+  }
+
+  static Code* Lookup(FixedArray* cache, Name* name, Code::Flags flags) {
+    int length = cache->length();
+    if (length == 0) return nullptr;
+    if (length == kEntrySize) return OneElementLookup(cache, name, flags);
+    if (!cache->IsCodeCacheHashTable()) {
+      return LinearLookup(cache, name, flags);
+    } else {
+      return CodeCacheHashTable::cast(cache)->Lookup(name, flags);
+    }
+  }
+
+ private:
+  static const int kNameIndex = 0;
+  static const int kCodeIndex = 1;
+  static const int kEntrySize = 2;
+
+  static const int kLinearUsageIndex = 0;
+  static const int kLinearReservedSlots = 1;
+  static const int kLinearInitialCapacity = 2;
+  static const int kLinearMaxSize = 257;  // == LinearSizeFor(128);
+
+  static const int kHashTableInitialCapacity = 200;  // Number of entries.
+
+  static int LinearSizeFor(int entries) {
+    return kLinearReservedSlots + kEntrySize * entries;
+  }
+
+  static int LinearNewSize(int old_size) {
+    int old_entries = (old_size - kLinearReservedSlots) / kEntrySize;
+    return LinearSizeFor(old_entries * 2);
+  }
+
+  static Code* OneElementLookup(FixedArray* cache, Name* name,
+                                Code::Flags flags) {
+    DCHECK_EQ(cache->length(), kEntrySize);
+    if (cache->get(kNameIndex) != name) return nullptr;
+    Code* maybe_code = Code::cast(cache->get(kCodeIndex));
+    if (maybe_code->flags() != flags) return nullptr;
+    return maybe_code;
+  }
+
+  static Code* LinearLookup(FixedArray* cache, Name* name, Code::Flags flags) {
+    DCHECK_GE(cache->length(), kEntrySize);
+    DCHECK(!cache->IsCodeCacheHashTable());
+    int usage = GetLinearUsage(cache);
+    for (int i = kLinearReservedSlots; i < usage; i += kEntrySize) {
+      if (cache->get(i + kNameIndex) != name) continue;
+      Code* code = Code::cast(cache->get(i + kCodeIndex));
+      if (code->flags() == flags) return code;
+    }
+    return nullptr;
+  }
+
+  static Handle<FixedArray> PutFirstElement(Isolate* isolate, Handle<Name> name,
+                                            Handle<Code> code) {
+    Handle<FixedArray> cache = isolate->factory()->NewFixedArray(kEntrySize);
+    cache->set(kNameIndex, *name);
+    cache->set(kCodeIndex, *code);
+    return cache;
+  }
+
+  static Handle<FixedArray> PutSecondElement(Isolate* isolate,
+                                             Handle<FixedArray> cache,
+                                             Handle<Name> name,
+                                             Handle<Code> code) {
+    DCHECK_EQ(cache->length(), kEntrySize);
+    Handle<FixedArray> new_cache = isolate->factory()->NewFixedArray(
+        LinearSizeFor(kLinearInitialCapacity));
+    new_cache->set(kLinearReservedSlots + kNameIndex, cache->get(kNameIndex));
+    new_cache->set(kLinearReservedSlots + kCodeIndex, cache->get(kCodeIndex));
+    new_cache->set(LinearSizeFor(1) + kNameIndex, *name);
+    new_cache->set(LinearSizeFor(1) + kCodeIndex, *code);
+    new_cache->set(kLinearUsageIndex, Smi::FromInt(LinearSizeFor(2)));
+    return new_cache;
+  }
+
+  static Handle<FixedArray> PutLinearElement(Isolate* isolate,
+                                             Handle<FixedArray> cache,
+                                             Handle<Name> name,
+                                             Handle<Code> code) {
+    int length = cache->length();
+    int usage = GetLinearUsage(*cache);
+    DCHECK_LE(usage, length);
+    // Check if we need to grow.
+    if (usage == length) {
+      int new_length = LinearNewSize(length);
+      if (new_length > kLinearMaxSize) return Handle<FixedArray>::null();
+      Handle<FixedArray> new_cache =
+          isolate->factory()->NewFixedArray(new_length);
+      for (int i = kLinearReservedSlots; i < length; i++) {
+        new_cache->set(i, cache->get(i));
+      }
+      cache = new_cache;
+    }
+    // Store new entry.
+    DCHECK_GE(cache->length(), usage + kEntrySize);
+    cache->set(usage + kNameIndex, *name);
+    cache->set(usage + kCodeIndex, *code);
+    cache->set(kLinearUsageIndex, Smi::FromInt(usage + kEntrySize));
+    return cache;
+  }
+
+  static Handle<FixedArray> PutHashTableElement(Isolate* isolate,
+                                                Handle<FixedArray> cache,
+                                                Handle<Name> name,
+                                                Handle<Code> code) {
+    // Check if we need to transition from linear to hash table storage.
+    if (!cache->IsCodeCacheHashTable()) {
+      // Check that the initial hash table capacity is large enough.
+      DCHECK_EQ(kLinearMaxSize, LinearSizeFor(128));
+      STATIC_ASSERT(kHashTableInitialCapacity > 128);
+
+      int length = cache->length();
+      // Only migrate from linear storage when it's full.
+      DCHECK_EQ(length, GetLinearUsage(*cache));
+      DCHECK_EQ(length, kLinearMaxSize);
+      Handle<CodeCacheHashTable> table =
+          CodeCacheHashTable::New(isolate, kHashTableInitialCapacity);
+      HandleScope scope(isolate);
+      for (int i = kLinearReservedSlots; i < length; i += kEntrySize) {
+        Handle<Name> old_name(Name::cast(cache->get(i + kNameIndex)), isolate);
+        Handle<Code> old_code(Code::cast(cache->get(i + kCodeIndex)), isolate);
+        CodeCacheHashTable::Put(table, old_name, old_code);
+      }
+      cache = table;
+    }
+    // Store new entry.
+    DCHECK(cache->IsCodeCacheHashTable());
+    return CodeCacheHashTable::Put(Handle<CodeCacheHashTable>::cast(cache),
+                                   name, code);
+  }
+
+  static inline int GetLinearUsage(FixedArray* linear_cache) {
+    DCHECK_GT(linear_cache->length(), kEntrySize);
+    return Smi::cast(linear_cache->get(kLinearUsageIndex))->value();
+  }
+};
 
 void Map::UpdateCodeCache(Handle<Map> map,
                           Handle<Name> name,
                           Handle<Code> code) {
   Isolate* isolate = map->GetIsolate();
-  HandleScope scope(isolate);
-  // Allocate the code cache if not present.
-  if (!map->has_code_cache()) {
-    Handle<Object> result =
-        CodeCacheHashTable::New(isolate, CodeCacheHashTable::kInitialSize);
-    map->set_code_cache(*result);
-  }
-
-  // Update the code cache.
-  Handle<CodeCacheHashTable> cache(CodeCacheHashTable::cast(map->code_cache()),
-                                   isolate);
-  Handle<Object> new_cache = CodeCacheHashTable::Put(cache, name, code);
+  Handle<FixedArray> cache(map->code_cache(), isolate);
+  Handle<FixedArray> new_cache = CodeCache::Put(isolate, cache, name, code);
   map->set_code_cache(*new_cache);
 }
 
 Code* Map::LookupInCodeCache(Name* name, Code::Flags flags) {
-  if (!has_code_cache()) return nullptr;
-  return CodeCacheHashTable::cast(code_cache())->Lookup(name, flags);
+  return CodeCache::Lookup(code_cache(), name, flags);
 }
 
 
@@ -9937,7 +10149,7 @@
         .ToHandleChecked();
   }
   Isolate* isolate = accessor_pair->GetIsolate();
-  if (accessor->IsNull()) {
+  if (accessor->IsNull(isolate)) {
     return isolate->factory()->undefined_value();
   }
   return handle(accessor, isolate);
@@ -9965,12 +10177,21 @@
   return Handle<DeoptimizationOutputData>::cast(result);
 }
 
+const int LiteralsArray::kFeedbackVectorOffset =
+    LiteralsArray::OffsetOfElementAt(LiteralsArray::kVectorIndex);
+
+const int LiteralsArray::kOffsetToFirstLiteral =
+    LiteralsArray::OffsetOfElementAt(LiteralsArray::kFirstLiteralIndex);
 
 // static
 Handle<LiteralsArray> LiteralsArray::New(Isolate* isolate,
                                          Handle<TypeFeedbackVector> vector,
                                          int number_of_literals,
                                          PretenureFlag pretenure) {
+  if (vector->is_empty() && number_of_literals == 0) {
+    return Handle<LiteralsArray>::cast(
+        isolate->factory()->empty_literals_array());
+  }
   Handle<FixedArray> literals = isolate->factory()->NewFixedArray(
       number_of_literals + kFirstLiteralIndex, pretenure);
   Handle<LiteralsArray> casted_literals = Handle<LiteralsArray>::cast(literals);
@@ -10038,6 +10259,34 @@
 }
 #endif
 
+// static
+Handle<String> String::Trim(Handle<String> string, TrimMode mode) {
+  Isolate* const isolate = string->GetIsolate();
+  string = String::Flatten(string);
+  int const length = string->length();
+
+  // Perform left trimming if requested.
+  int left = 0;
+  UnicodeCache* unicode_cache = isolate->unicode_cache();
+  if (mode == kTrim || mode == kTrimLeft) {
+    while (left < length &&
+           unicode_cache->IsWhiteSpaceOrLineTerminator(string->Get(left))) {
+      left++;
+    }
+  }
+
+  // Perform right trimming if requested.
+  int right = length;
+  if (mode == kTrim || mode == kTrimRight) {
+    while (
+        right > left &&
+        unicode_cache->IsWhiteSpaceOrLineTerminator(string->Get(right - 1))) {
+      right--;
+    }
+  }
+
+  return isolate->factory()->NewSubString(string, left, right);
+}
 
 bool String::LooksValid() {
   if (!GetIsolate()->heap()->Contains(this)) return false;
@@ -10051,7 +10300,9 @@
   // ES6 section 9.2.11 SetFunctionName, step 4.
   Isolate* const isolate = name->GetIsolate();
   Handle<Object> description(Handle<Symbol>::cast(name)->name(), isolate);
-  if (description->IsUndefined()) return isolate->factory()->empty_string();
+  if (description->IsUndefined(isolate)) {
+    return isolate->factory()->empty_string();
+  }
   IncrementalStringBuilder builder(isolate);
   builder.AppendCharacter('[');
   builder.AppendString(Handle<String>::cast(description));
@@ -10059,6 +10310,19 @@
   return builder.Finish();
 }
 
+// static
+MaybeHandle<String> Name::ToFunctionName(Handle<Name> name,
+                                         Handle<String> prefix) {
+  Handle<String> name_string;
+  Isolate* const isolate = name->GetIsolate();
+  ASSIGN_RETURN_ON_EXCEPTION(isolate, name_string, ToFunctionName(name),
+                             String);
+  IncrementalStringBuilder builder(isolate);
+  builder.AppendString(prefix);
+  builder.AppendCharacter(' ');
+  builder.AppendString(name_string);
+  return builder.Finish();
+}
 
 namespace {
 
@@ -11134,8 +11398,8 @@
   value |= length << String::ArrayIndexLengthBits::kShift;
 
   DCHECK((value & String::kIsNotArrayIndexMask) == 0);
-  DCHECK((length > String::kMaxCachedArrayIndexLength) ||
-         (value & String::kContainsCachedArrayIndexMask) == 0);
+  DCHECK_EQ(length <= String::kMaxCachedArrayIndexLength,
+            (value & String::kContainsCachedArrayIndexMask) == 0);
   return value;
 }
 
@@ -11351,6 +11615,30 @@
   // No write barrier required, since the builtin is part of the root set.
 }
 
+// static
+Handle<LiteralsArray> SharedFunctionInfo::FindOrCreateLiterals(
+    Handle<SharedFunctionInfo> shared, Handle<Context> native_context) {
+  Isolate* isolate = shared->GetIsolate();
+  CodeAndLiterals result =
+      shared->SearchOptimizedCodeMap(*native_context, BailoutId::None());
+  if (result.literals != nullptr) {
+    DCHECK(shared->feedback_metadata()->is_empty() ||
+           !result.literals->feedback_vector()->is_empty());
+    return handle(result.literals, isolate);
+  }
+
+  Handle<TypeFeedbackVector> feedback_vector =
+      TypeFeedbackVector::New(isolate, handle(shared->feedback_metadata()));
+  Handle<LiteralsArray> literals = LiteralsArray::New(
+      isolate, feedback_vector, shared->num_literals(), TENURED);
+  Handle<Code> code;
+  if (result.code != nullptr) {
+    code = Handle<Code>(result.code, isolate);
+  }
+  AddToOptimizedCodeMap(shared, native_context, code, literals,
+                        BailoutId::None());
+  return literals;
+}
 
 void SharedFunctionInfo::AddSharedCodeToOptimizedCodeMap(
     Handle<SharedFunctionInfo> shared, Handle<Code> code) {
@@ -11397,9 +11685,13 @@
             isolate->factory()->NewWeakCell(code.ToHandleChecked());
         old_code_map->set(entry + kCachedCodeOffset, *code_cell);
       }
-      Handle<WeakCell> literals_cell =
-          isolate->factory()->NewWeakCell(literals);
-      old_code_map->set(entry + kLiteralsOffset, *literals_cell);
+      if (literals->literals_count() == 0) {
+        old_code_map->set(entry + kLiteralsOffset, *literals);
+      } else {
+        Handle<WeakCell> literals_cell =
+            isolate->factory()->NewWeakCell(literals);
+        old_code_map->set(entry + kLiteralsOffset, *literals_cell);
+      }
       return;
     }
 
@@ -11430,12 +11722,18 @@
   Handle<WeakCell> code_cell =
       code.is_null() ? isolate->factory()->empty_weak_cell()
                      : isolate->factory()->NewWeakCell(code.ToHandleChecked());
-  Handle<WeakCell> literals_cell = isolate->factory()->NewWeakCell(literals);
   WeakCell* context_cell = native_context->self_weak_cell();
 
   new_code_map->set(entry + kContextOffset, context_cell);
   new_code_map->set(entry + kCachedCodeOffset, *code_cell);
-  new_code_map->set(entry + kLiteralsOffset, *literals_cell);
+
+  if (literals->literals_count() == 0) {
+    new_code_map->set(entry + kLiteralsOffset, *literals);
+  } else {
+    Handle<WeakCell> literals_cell = isolate->factory()->NewWeakCell(literals);
+    new_code_map->set(entry + kLiteralsOffset, *literals_cell);
+  }
+
   new_code_map->set(entry + kOsrAstIdOffset, Smi::FromInt(osr_ast_id.ToInt()));
 
 #ifdef DEBUG
@@ -11446,8 +11744,16 @@
     DCHECK(cell->cleared() ||
            (cell->value()->IsCode() &&
             Code::cast(cell->value())->kind() == Code::OPTIMIZED_FUNCTION));
-    cell = WeakCell::cast(new_code_map->get(i + kLiteralsOffset));
-    DCHECK(cell->cleared() || cell->value()->IsFixedArray());
+    Object* lits = new_code_map->get(i + kLiteralsOffset);
+    if (lits->IsWeakCell()) {
+      cell = WeakCell::cast(lits);
+      DCHECK(cell->cleared() ||
+             (cell->value()->IsLiteralsArray() &&
+              LiteralsArray::cast(cell->value())->literals_count() > 0));
+    } else {
+      DCHECK(lits->IsLiteralsArray() &&
+             LiteralsArray::cast(lits)->literals_count() == 0);
+    }
     DCHECK(new_code_map->get(i + kOsrAstIdOffset)->IsSmi());
   }
 #endif
@@ -11547,6 +11853,17 @@
   }
 }
 
+// static
+void JSFunction::EnsureLiterals(Handle<JSFunction> function) {
+  Handle<SharedFunctionInfo> shared(function->shared());
+  Handle<Context> native_context(function->context()->native_context());
+  if (function->literals() ==
+      function->GetIsolate()->heap()->empty_literals_array()) {
+    Handle<LiteralsArray> literals =
+        SharedFunctionInfo::FindOrCreateLiterals(shared, native_context);
+    function->set_literals(*literals);
+  }
+}
 
 static void GetMinInobjectSlack(Map* map, void* data) {
   int slack = map->unused_property_fields();
@@ -11573,7 +11890,7 @@
 
 void Map::CompleteInobjectSlackTracking() {
   // Has to be an initial map.
-  DCHECK(GetBackPointer()->IsUndefined());
+  DCHECK(GetBackPointer()->IsUndefined(GetIsolate()));
 
   int slack = unused_property_fields();
   TransitionArray::TraverseTransitionTree(this, &GetMinInobjectSlack, &slack);
@@ -11604,6 +11921,26 @@
   return false;
 }
 
+// static
+void JSObject::MakePrototypesFast(Handle<Object> receiver,
+                                  WhereToStart where_to_start,
+                                  Isolate* isolate) {
+  if (!receiver->IsJSReceiver()) return;
+  for (PrototypeIterator iter(isolate, Handle<JSReceiver>::cast(receiver),
+                              where_to_start);
+       !iter.IsAtEnd(); iter.Advance()) {
+    Handle<Object> current = PrototypeIterator::GetCurrent(iter);
+    if (!current->IsJSObject()) return;
+    Handle<JSObject> current_obj = Handle<JSObject>::cast(current);
+    Map* current_map = current_obj->map();
+    if (current_map->is_prototype_map() &&
+        !current_map->should_be_fast_prototype_map()) {
+      Handle<Map> map(current_map);
+      Map::SetShouldBeFastPrototypeMap(map, true, isolate);
+      JSObject::OptimizeAsPrototype(current_obj, FAST_PROTOTYPE);
+    }
+  }
+}
 
 // static
 void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
@@ -11615,10 +11952,12 @@
                                   "NormalizeAsPrototype");
   }
   Handle<Map> previous_map(object->map());
-  if (!object->HasFastProperties()) {
-    JSObject::MigrateSlowToFast(object, 0, "OptimizeAsPrototype");
-  }
-  if (!object->map()->is_prototype_map()) {
+  if (object->map()->is_prototype_map()) {
+    if (object->map()->should_be_fast_prototype_map() &&
+        !object->HasFastProperties()) {
+      JSObject::MigrateSlowToFast(object, 0, "OptimizeAsPrototype");
+    }
+  } else {
     if (object->map() == *previous_map) {
       Handle<Map> new_map = Map::Copy(handle(object->map()), "CopyAsPrototype");
       JSObject::MigrateToMap(object, new_map);
@@ -11646,13 +11985,13 @@
 // static
 void JSObject::ReoptimizeIfPrototype(Handle<JSObject> object) {
   if (!object->map()->is_prototype_map()) return;
+  if (!object->map()->should_be_fast_prototype_map()) return;
   OptimizeAsPrototype(object, FAST_PROTOTYPE);
 }
 
 
 // static
 void JSObject::LazyRegisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
-  DCHECK(FLAG_track_prototype_users);
   // Contract: In line with InvalidatePrototypeChains()'s requirements,
   // leaf maps don't need to register as users, only prototypes do.
   DCHECK(user->is_prototype_map());
@@ -11758,7 +12097,6 @@
 
 // static
 void JSObject::InvalidatePrototypeChains(Map* map) {
-  if (!FLAG_eliminate_prototype_chain_checks) return;
   DisallowHeapAllocation no_gc;
   InvalidatePrototypeChainsInternal(map);
 }
@@ -11789,6 +12127,15 @@
   return proto_info;
 }
 
+// static
+void Map::SetShouldBeFastPrototypeMap(Handle<Map> map, bool value,
+                                      Isolate* isolate) {
+  if (value == false && !map->prototype_info()->IsPrototypeInfo()) {
+    // "False" is the implicit default value, so there's nothing to do.
+    return;
+  }
+  GetOrCreatePrototypeInfo(map, isolate)->set_should_be_fast_map(value);
+}
 
 // static
 Handle<Cell> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
@@ -11839,8 +12186,9 @@
   }
   map->set_has_hidden_prototype(is_hidden);
 
-  WriteBarrierMode wb_mode =
-      prototype->IsNull() ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
+  WriteBarrierMode wb_mode = prototype->IsNull(map->GetIsolate())
+                                 ? SKIP_WRITE_BARRIER
+                                 : UPDATE_WRITE_BARRIER;
   map->set_prototype(*prototype, wb_mode);
 }
 
@@ -12022,6 +12370,8 @@
     case JS_MESSAGE_OBJECT_TYPE:
     case JS_MODULE_TYPE:
     case JS_OBJECT_TYPE:
+    case JS_ERROR_TYPE:
+    case JS_ARGUMENTS_TYPE:
     case JS_PROMISE_TYPE:
     case JS_REGEXP_TYPE:
     case JS_SET_ITERATOR_TYPE:
@@ -12387,7 +12737,7 @@
     // Due to laziness, the position may not have been translated from code
     // offset yet, which would be encoded as negative integer. In that case,
     // translate and set the position.
-    if (eval_from_shared()->IsUndefined()) {
+    if (eval_from_shared()->IsUndefined(GetIsolate())) {
       position = 0;
     } else {
       SharedFunctionInfo* shared = SharedFunctionInfo::cast(eval_from_shared());
@@ -12400,12 +12750,11 @@
 }
 
 void Script::InitLineEnds(Handle<Script> script) {
-  if (!script->line_ends()->IsUndefined()) return;
-
   Isolate* isolate = script->GetIsolate();
+  if (!script->line_ends()->IsUndefined(isolate)) return;
 
   if (!script->source()->IsString()) {
-    DCHECK(script->source()->IsUndefined());
+    DCHECK(script->source()->IsUndefined(isolate));
     Handle<FixedArray> empty = isolate->factory()->NewFixedArray(0);
     script->set_line_ends(*empty);
     DCHECK(script->line_ends()->IsFixedArray());
@@ -12424,42 +12773,93 @@
   DCHECK(script->line_ends()->IsFixedArray());
 }
 
-
-int Script::GetColumnNumber(Handle<Script> script, int code_pos) {
-  int line_number = GetLineNumber(script, code_pos);
-  if (line_number == -1) return -1;
+#define SMI_VALUE(x) (Smi::cast(x)->value())
+bool Script::GetPositionInfo(int position, PositionInfo* info,
+                             OffsetFlag offset_flag) {
+  Handle<Script> script(this);
+  InitLineEnds(script);
 
   DisallowHeapAllocation no_allocation;
-  FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
-  line_number = line_number - script->line_offset();
-  if (line_number == 0) return code_pos + script->column_offset();
-  int prev_line_end_pos =
-      Smi::cast(line_ends_array->get(line_number - 1))->value();
-  return code_pos - (prev_line_end_pos + 1);
-}
 
+  DCHECK(script->line_ends()->IsFixedArray());
+  FixedArray* ends = FixedArray::cast(script->line_ends());
 
-int Script::GetLineNumberWithArray(int code_pos) {
-  DisallowHeapAllocation no_allocation;
-  DCHECK(line_ends()->IsFixedArray());
-  FixedArray* line_ends_array = FixedArray::cast(line_ends());
-  int line_ends_len = line_ends_array->length();
-  if (line_ends_len == 0) return -1;
+  const int ends_len = ends->length();
+  if (ends_len == 0) return false;
 
-  if ((Smi::cast(line_ends_array->get(0)))->value() >= code_pos) {
-    return line_offset();
+  // Return early on invalid positions. Negative positions behave as if 0 was
+  // passed, and positions beyond the end of the script return as failure.
+  if (position < 0) {
+    position = 0;
+  } else if (position > SMI_VALUE(ends->get(ends_len - 1))) {
+    return false;
   }
 
-  int left = 0;
-  int right = line_ends_len;
-  while (int half = (right - left) / 2) {
-    if ((Smi::cast(line_ends_array->get(left + half)))->value() > code_pos) {
-      right -= half;
-    } else {
-      left += half;
+  // Determine line number by doing a binary search on the line ends array.
+  if (SMI_VALUE(ends->get(0)) >= position) {
+    info->line = 0;
+    info->line_start = 0;
+    info->column = position;
+  } else {
+    int left = 0;
+    int right = ends_len - 1;
+
+    while (right > 0) {
+      DCHECK_LE(left, right);
+      const int mid = (left + right) / 2;
+      if (position > SMI_VALUE(ends->get(mid))) {
+        left = mid + 1;
+      } else if (position <= SMI_VALUE(ends->get(mid - 1))) {
+        right = mid - 1;
+      } else {
+        info->line = mid;
+        break;
+      }
+    }
+    DCHECK(SMI_VALUE(ends->get(info->line)) >= position &&
+           SMI_VALUE(ends->get(info->line - 1)) < position);
+    info->line_start = SMI_VALUE(ends->get(info->line - 1)) + 1;
+    info->column = position - info->line_start;
+  }
+
+  // Line end is position of the linebreak character.
+  info->line_end = SMI_VALUE(ends->get(info->line));
+  if (info->line_end > 0) {
+    DCHECK(script->source()->IsString());
+    Handle<String> src(String::cast(script->source()));
+    if (src->Get(info->line_end - 1) == '\r') {
+      info->line_end--;
     }
   }
-  return right + line_offset();
+
+  // Add offsets if requested.
+  if (offset_flag == WITH_OFFSET) {
+    if (info->line == 0) {
+      info->column += script->column_offset();
+    }
+    info->line += script->line_offset();
+  }
+
+  return true;
+}
+#undef SMI_VALUE
+
+int Script::GetColumnNumber(Handle<Script> script, int code_pos) {
+  PositionInfo info;
+  if (!script->GetPositionInfo(code_pos, &info, WITH_OFFSET)) {
+    return -1;
+  }
+
+  return info.column;
+}
+
+int Script::GetLineNumberWithArray(int code_pos) {
+  PositionInfo info;
+  if (!GetPositionInfo(code_pos, &info, WITH_OFFSET)) {
+    return -1;
+  }
+
+  return info.line;
 }
 
 
@@ -12471,7 +12871,9 @@
 
 int Script::GetLineNumber(int code_pos) {
   DisallowHeapAllocation no_allocation;
-  if (!line_ends()->IsUndefined()) return GetLineNumberWithArray(code_pos);
+  if (!line_ends()->IsUndefined(GetIsolate())) {
+    return GetLineNumberWithArray(code_pos);
+  }
 
   // Slow mode: we do not have line_ends. We have to iterate through source.
   if (!source()->IsString()) return -1;
@@ -12510,7 +12912,7 @@
 
 Handle<JSObject> Script::GetWrapper(Handle<Script> script) {
   Isolate* isolate = script->GetIsolate();
-  if (!script->wrapper()->IsUndefined()) {
+  if (!script->wrapper()->IsUndefined(isolate)) {
     DCHECK(script->wrapper()->IsWeakCell());
     Handle<WeakCell> cell(WeakCell::cast(script->wrapper()));
     if (!cell->cleared()) {
@@ -12675,8 +13077,9 @@
 }
 
 bool SharedFunctionInfo::HasSourceCode() const {
-  return !script()->IsUndefined() &&
-         !reinterpret_cast<Script*>(script())->source()->IsUndefined();
+  Isolate* isolate = GetIsolate();
+  return !script()->IsUndefined(isolate) &&
+         !reinterpret_cast<Script*>(script())->source()->IsUndefined(isolate);
 }
 
 
@@ -12732,9 +13135,8 @@
     int* instance_size, int* in_object_properties) {
   Isolate* isolate = GetIsolate();
   int expected_nof_properties = 0;
-  for (PrototypeIterator iter(isolate, this,
-                              PrototypeIterator::START_AT_RECEIVER);
-       !iter.IsAtEnd(); iter.Advance()) {
+  for (PrototypeIterator iter(isolate, this, kStartAtReceiver); !iter.IsAtEnd();
+       iter.Advance()) {
     JSReceiver* current = iter.GetCurrent<JSReceiver>();
     if (!current->IsJSFunction()) break;
     JSFunction* func = JSFunction::cast(current);
@@ -12883,13 +13285,7 @@
   shared_info->set_language_mode(lit->language_mode());
   shared_info->set_uses_arguments(lit->scope()->arguments() != NULL);
   shared_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
-  shared_info->set_ast_node_count(lit->ast_node_count());
   shared_info->set_is_function(lit->is_function());
-  if (lit->dont_optimize_reason() != kNoReason) {
-    shared_info->DisableOptimization(lit->dont_optimize_reason());
-  }
-  shared_info->set_dont_crankshaft(lit->flags() &
-                                   AstProperties::kDontCrankshaft);
   shared_info->set_never_compiled(true);
   shared_info->set_kind(lit->kind());
   if (!IsConstructable(lit->kind(), lit->language_mode())) {
@@ -12928,9 +13324,6 @@
 
 void SharedFunctionInfo::ResetForNewContext(int new_ic_age) {
   code()->ClearInlineCaches();
-  // If we clear ICs, we need to clear the type feedback vector too, since
-  // CallICs are synced with a feedback vector slot.
-  ClearTypeFeedbackInfo();
   set_ic_age(new_ic_age);
   if (code()->kind() == Code::FUNCTION) {
     code()->set_profiler_ticks(0);
@@ -12940,7 +13333,7 @@
     }
     set_opt_count(0);
     set_deopt_count(0);
-  } else if (code()->is_interpreter_entry_trampoline()) {
+  } else if (code()->is_interpreter_trampoline_builtin()) {
     set_profiler_ticks(0);
     if (optimization_disabled() && opt_count() >= FLAG_max_opt_count) {
       // Re-enable optimizations if they were disabled due to opt_count limit.
@@ -12976,6 +13369,19 @@
   return -1;
 }
 
+void SharedFunctionInfo::ClearCodeFromOptimizedCodeMap() {
+  if (!OptimizedCodeMapIsCleared()) {
+    FixedArray* optimized_code_map = this->optimized_code_map();
+    int length = optimized_code_map->length();
+    WeakCell* empty_weak_cell = GetHeap()->empty_weak_cell();
+    for (int i = kEntriesStart; i < length; i += kEntryLength) {
+      optimized_code_map->set(i + kCachedCodeOffset, empty_weak_cell,
+                              SKIP_WRITE_BARRIER);
+    }
+    optimized_code_map->set(kSharedCodeIndex, empty_weak_cell,
+                            SKIP_WRITE_BARRIER);
+  }
+}
 
 CodeAndLiterals SharedFunctionInfo::SearchOptimizedCodeMap(
     Context* native_context, BailoutId osr_ast_id) {
@@ -12993,13 +13399,18 @@
     } else {
       DCHECK_LE(entry + kEntryLength, code_map->length());
       WeakCell* cell = WeakCell::cast(code_map->get(entry + kCachedCodeOffset));
-      WeakCell* literals_cell =
-          WeakCell::cast(code_map->get(entry + kLiteralsOffset));
-
+      Object* lits = code_map->get(entry + kLiteralsOffset);
+      LiteralsArray* literals = nullptr;
+      if (lits->IsWeakCell()) {
+        WeakCell* literal_cell = WeakCell::cast(lits);
+        if (!literal_cell->cleared()) {
+          literals = LiteralsArray::cast(literal_cell->value());
+        }
+      } else {
+        literals = LiteralsArray::cast(lits);
+      }
       result = {cell->cleared() ? nullptr : Code::cast(cell->value()),
-                literals_cell->cleared()
-                    ? nullptr
-                    : LiteralsArray::cast(literals_cell->value())};
+                literals};
     }
   }
   return result;
@@ -13024,63 +13435,66 @@
 
 void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
   DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
-  Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
-  Object* old_target = target;
-  VisitPointer(&target);
-  CHECK_EQ(target, old_target);  // VisitPointer doesn't change Code* *target.
+  Object* old_pointer = Code::GetCodeFromTargetAddress(rinfo->target_address());
+  Object* new_pointer = old_pointer;
+  VisitPointer(&new_pointer);
+  DCHECK_EQ(old_pointer, new_pointer);
 }
 
 
 void ObjectVisitor::VisitCodeAgeSequence(RelocInfo* rinfo) {
   DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
-  Object* stub = rinfo->code_age_stub();
-  if (stub) {
-    VisitPointer(&stub);
+  Object* old_pointer = rinfo->code_age_stub();
+  Object* new_pointer = old_pointer;
+  if (old_pointer != nullptr) {
+    VisitPointer(&new_pointer);
+    DCHECK_EQ(old_pointer, new_pointer);
   }
 }
 
 
 void ObjectVisitor::VisitCodeEntry(Address entry_address) {
-  Object* code = Code::GetObjectFromEntryAddress(entry_address);
-  Object* old_code = code;
-  VisitPointer(&code);
-  if (code != old_code) {
-    Memory::Address_at(entry_address) = reinterpret_cast<Code*>(code)->entry();
-  }
+  Object* old_pointer = Code::GetObjectFromEntryAddress(entry_address);
+  Object* new_pointer = old_pointer;
+  VisitPointer(&new_pointer);
+  DCHECK_EQ(old_pointer, new_pointer);
 }
 
 
 void ObjectVisitor::VisitCell(RelocInfo* rinfo) {
   DCHECK(rinfo->rmode() == RelocInfo::CELL);
-  Object* cell = rinfo->target_cell();
-  Object* old_cell = cell;
-  VisitPointer(&cell);
-  if (cell != old_cell) {
-    rinfo->set_target_cell(reinterpret_cast<Cell*>(cell));
-  }
+  Object* old_pointer = rinfo->target_cell();
+  Object* new_pointer = old_pointer;
+  VisitPointer(&new_pointer);
+  DCHECK_EQ(old_pointer, new_pointer);
 }
 
 
 void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
   DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
          rinfo->IsPatchedDebugBreakSlotSequence());
-  Object* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
-  Object* old_target = target;
-  VisitPointer(&target);
-  CHECK_EQ(target, old_target);  // VisitPointer doesn't change Code* *target.
+  Object* old_pointer =
+      Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
+  Object* new_pointer = old_pointer;
+  VisitPointer(&new_pointer);
+  DCHECK_EQ(old_pointer, new_pointer);
 }
 
 
 void ObjectVisitor::VisitEmbeddedPointer(RelocInfo* rinfo) {
   DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
-  Object* p = rinfo->target_object();
-  VisitPointer(&p);
+  Object* old_pointer = rinfo->target_object();
+  Object* new_pointer = old_pointer;
+  VisitPointer(&new_pointer);
+  DCHECK_EQ(old_pointer, new_pointer);
 }
 
 
 void ObjectVisitor::VisitExternalReference(RelocInfo* rinfo) {
-  Address p = rinfo->target_external_reference();
-  VisitExternalReference(&p);
+  Address old_reference = rinfo->target_external_reference();
+  Address new_reference = old_reference;
+  VisitExternalReference(&new_reference);
+  DCHECK_EQ(old_reference, new_reference);
 }
 
 
@@ -13119,6 +13533,14 @@
   CopyBytes(instruction_start(), desc.buffer,
             static_cast<size_t>(desc.instr_size));
 
+  // copy unwinding info, if any
+  if (desc.unwinding_info) {
+    DCHECK_GT(desc.unwinding_info_size, 0);
+    set_unwinding_info_size(desc.unwinding_info_size);
+    CopyBytes(unwinding_info_start(), desc.unwinding_info,
+              static_cast<size_t>(desc.unwinding_info_size));
+  }
+
   // copy reloc info
   CopyBytes(relocation_start(),
             desc.buffer + desc.buffer_size - desc.reloc_size,
@@ -13171,31 +13593,14 @@
 // The position returned is relative to the beginning of the script where the
 // source for this function is found.
 int Code::SourcePosition(int code_offset) {
-  Address pc = instruction_start() + code_offset;
-  int distance = kMaxInt;
+  // Subtract one because the current PC is one instruction after the call site.
+  Address pc = instruction_start() + code_offset - 1;
   int position = RelocInfo::kNoPosition;  // Initially no position found.
-  // Run through all the relocation info to find the best matching source
-  // position. All the code needs to be considered as the sequence of the
-  // instructions in the code does not necessarily follow the same order as the
-  // source.
-  RelocIterator it(this, RelocInfo::kPositionMask);
-  while (!it.done()) {
-    // Only look at positions after the current pc.
-    if (it.rinfo()->pc() < pc) {
-      // Get position and distance.
-
-      int dist = static_cast<int>(pc - it.rinfo()->pc());
-      int pos = static_cast<int>(it.rinfo()->data());
-      // If this position is closer than the current candidate or if it has the
-      // same distance as the current candidate and the position is higher then
-      // this position is the new candidate.
-      if ((dist < distance) ||
-          (dist == distance && pos > position)) {
-        position = pos;
-        distance = dist;
-      }
-    }
-    it.next();
+  // Find the closest position attached to a pc lower or equal to the current.
+  // Note that the pc of reloc infos grow monotonically.
+  for (RelocIterator it(this, RelocInfo::kPositionMask);
+       !it.done() && it.rinfo()->pc() <= pc; it.next()) {
+    position = static_cast<int>(it.rinfo()->data());
   }
   DCHECK(kind() == FUNCTION || (is_optimized_code() && is_turbofanned()) ||
          is_wasm_code() || position == RelocInfo::kNoPosition);
@@ -13206,20 +13611,18 @@
 // Same as Code::SourcePosition above except it only looks for statement
 // positions.
 int Code::SourceStatementPosition(int code_offset) {
-  // First find the position as close as possible using all position
-  // information.
+  // First find the closest position.
   int position = SourcePosition(code_offset);
   // Now find the closest statement position before the position.
   int statement_position = 0;
-  RelocIterator it(this, RelocInfo::kPositionMask);
-  while (!it.done()) {
+  for (RelocIterator it(this, RelocInfo::kPositionMask); !it.done();
+       it.next()) {
     if (RelocInfo::IsStatementPosition(it.rinfo()->rmode())) {
       int p = static_cast<int>(it.rinfo()->data());
       if (statement_position < p && p <= position) {
         statement_position = p;
       }
     }
-    it.next();
   }
   return statement_position;
 }
@@ -13307,16 +13710,14 @@
                            : GetCode()->SourceStatementPosition(offset);
 }
 
-void SharedFunctionInfo::ClearTypeFeedbackInfo() {
-  feedback_vector()->ClearSlots(this);
+void JSFunction::ClearTypeFeedbackInfo() {
+  feedback_vector()->ClearSlots(shared());
 }
 
-
-void SharedFunctionInfo::ClearTypeFeedbackInfoAtGCTime() {
-  feedback_vector()->ClearSlotsAtGCTime(this);
+void JSFunction::ClearTypeFeedbackInfoAtGCTime() {
+  feedback_vector()->ClearSlotsAtGCTime(shared());
 }
 
-
 BailoutId Code::TranslatePcOffsetToAstId(uint32_t pc_offset) {
   DisallowHeapAllocation no_gc;
   DCHECK(kind() == FUNCTION);
@@ -13552,6 +13953,14 @@
   return NULL;
 }
 
+// Identify kind of code.
+const char* AbstractCode::Kind2String(Kind kind) {
+  if (kind < AbstractCode::INTERPRETED_FUNCTION)
+    return Code::Kind2String((Code::Kind)kind);
+  if (kind == AbstractCode::INTERPRETED_FUNCTION) return "INTERPRETED_FUNCTION";
+  UNREACHABLE();
+  return NULL;
+}
 
 Handle<WeakCell> Code::WeakCellFor(Handle<Code> code) {
   DCHECK(code->kind() == OPTIMIZED_FUNCTION);
@@ -13575,7 +13984,6 @@
   return NULL;
 }
 
-
 #ifdef ENABLE_DISASSEMBLER
 
 void DeoptimizationInputData::DeoptimizationInputDataPrint(
@@ -13709,9 +14117,20 @@
           break;
         }
 
+        case Translation::FLOAT_REGISTER: {
+          int reg_code = iterator.Next();
+          os << "{input="
+             << RegisterConfiguration::Crankshaft()->GetFloatRegisterName(
+                    reg_code)
+             << "}";
+          break;
+        }
+
         case Translation::DOUBLE_REGISTER: {
           int reg_code = iterator.Next();
-          os << "{input=" << DoubleRegister::from_code(reg_code).ToString()
+          os << "{input="
+             << RegisterConfiguration::Crankshaft()->GetDoubleRegisterName(
+                    reg_code)
              << "}";
           break;
         }
@@ -13740,6 +14159,7 @@
           break;
         }
 
+        case Translation::FLOAT_STACK_SLOT:
         case Translation::DOUBLE_STACK_SLOT: {
           int input_slot_index = iterator.Next();
           os << "{input=" << input_slot_index << "}";
@@ -13830,7 +14250,6 @@
     case POLYMORPHIC: return "POLYMORPHIC";
     case MEGAMORPHIC: return "MEGAMORPHIC";
     case GENERIC: return "GENERIC";
-    case DEBUG_STUB: return "DEBUG_STUB";
   }
   UNREACHABLE();
   return NULL;
@@ -13856,7 +14275,10 @@
     os << "major_key = " << (n == NULL ? "null" : n) << "\n";
   }
   if (is_inline_cache_stub()) {
-    os << "ic_state = " << ICState2String(ic_state()) << "\n";
+    if (!IC::ICUseVector(kind())) {
+      InlineCacheState ic_state = IC::StateFromCode(this);
+      os << "ic_state = " << ICState2String(ic_state) << "\n";
+    }
     PrintExtraICState(os, kind(), extra_ic_state());
     if (is_compare_ic_stub()) {
       DCHECK(CodeStub::GetMajorKey(this) == CodeStub::CompareIC);
@@ -13976,7 +14398,7 @@
       os << "\n";
     }
 #ifdef OBJECT_PRINT
-    if (!type_feedback_info()->IsUndefined()) {
+    if (!type_feedback_info()->IsUndefined(GetIsolate())) {
       TypeFeedbackInfo::cast(type_feedback_info())->TypeFeedbackInfoPrint(os);
       os << "\n";
     }
@@ -14013,20 +14435,18 @@
 }
 
 int BytecodeArray::SourceStatementPosition(int offset) {
-  // First find the position as close as possible using all position
-  // information.
+  // First find the closest position.
   int position = SourcePosition(offset);
   // Now find the closest statement position before the position.
   int statement_position = 0;
-  interpreter::SourcePositionTableIterator iterator(source_position_table());
-  while (!iterator.done()) {
-    if (iterator.is_statement()) {
-      int p = iterator.source_position();
+  for (interpreter::SourcePositionTableIterator it(source_position_table());
+       !it.done(); it.Advance()) {
+    if (it.is_statement()) {
+      int p = it.source_position();
       if (statement_position < p && p <= position) {
         statement_position = p;
       }
     }
-    iterator.Advance();
   }
   return statement_position;
 }
@@ -14429,7 +14849,7 @@
   STACK_CHECK(isolate, Nothing<bool>());
   Handle<Name> trap_name = isolate->factory()->setPrototypeOf_string();
   // 1. Assert: Either Type(V) is Object or Type(V) is Null.
-  DCHECK(value->IsJSReceiver() || value->IsNull());
+  DCHECK(value->IsJSReceiver() || value->IsNull(isolate));
   // 2. Let handler be the value of the [[ProxyHandler]] internal slot of O.
   Handle<Object> handler(proxy->handler(), isolate);
   // 3. If handler is null, throw a TypeError exception.
@@ -14448,7 +14868,7 @@
       Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name),
       Nothing<bool>());
   // 7. If trap is undefined, then return target.[[SetPrototypeOf]]().
-  if (trap->IsUndefined()) {
+  if (trap->IsUndefined(isolate)) {
     return JSReceiver::SetPrototype(target, value, from_javascript,
                                     should_throw);
   }
@@ -14516,7 +14936,7 @@
   Heap* heap = isolate->heap();
   // Silently ignore the change if value is not a JSObject or null.
   // SpiderMonkey behaves this way.
-  if (!value->IsJSReceiver() && !value->IsNull()) return Just(true);
+  if (!value->IsJSReceiver() && !value->IsNull(isolate)) return Just(true);
 
   bool dictionary_elements_in_chain =
       object->map()->DictionaryElementsInPrototypeChainOnly();
@@ -14526,8 +14946,7 @@
   if (from_javascript) {
     // Find the first object in the chain whose prototype object is not
     // hidden.
-    PrototypeIterator iter(isolate, real_receiver,
-                           PrototypeIterator::START_AT_PROTOTYPE,
+    PrototypeIterator iter(isolate, real_receiver, kStartAtPrototype,
                            PrototypeIterator::END_AT_NON_HIDDEN);
     while (!iter.IsAtEnd()) {
       // Casting to JSObject is fine because hidden prototypes are never
@@ -14560,7 +14979,7 @@
   // new prototype chain.
   if (value->IsJSReceiver()) {
     for (PrototypeIterator iter(isolate, JSReceiver::cast(*value),
-                                PrototypeIterator::START_AT_RECEIVER);
+                                kStartAtReceiver);
          !iter.IsAtEnd(); iter.Advance()) {
       if (iter.GetCurrent<JSReceiver>() == *object) {
         // Cycle detected.
@@ -15048,10 +15467,11 @@
 #ifdef OBJECT_PRINT
 template <typename Derived, typename Shape, typename Key>
 void Dictionary<Derived, Shape, Key>::Print(std::ostream& os) {  // NOLINT
+  Isolate* isolate = this->GetIsolate();
   int capacity = this->Capacity();
   for (int i = 0; i < capacity; i++) {
     Object* k = this->KeyAt(i);
-    if (this->IsKey(k)) {
+    if (this->IsKey(isolate, k)) {
       os << "\n   ";
       if (k->IsString()) {
         String::cast(k)->StringPrint(os);
@@ -15062,18 +15482,24 @@
     }
   }
 }
+template <typename Derived, typename Shape, typename Key>
+void Dictionary<Derived, Shape, Key>::Print() {
+  OFStream os(stdout);
+  Print(os);
+}
 #endif
 
 
 template<typename Derived, typename Shape, typename Key>
 void Dictionary<Derived, Shape, Key>::CopyValuesTo(FixedArray* elements) {
+  Isolate* isolate = this->GetIsolate();
   int pos = 0;
   int capacity = this->Capacity();
   DisallowHeapAllocation no_gc;
   WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
   for (int i = 0; i < capacity; i++) {
     Object* k = this->KeyAt(i);
-    if (this->IsKey(k)) {
+    if (this->IsKey(isolate, k)) {
       elements->set(pos++, this->ValueAt(i), mode);
     }
   }
@@ -15083,55 +15509,10 @@
 
 MaybeHandle<Object> JSObject::GetPropertyWithInterceptor(LookupIterator* it,
                                                          bool* done) {
-  *done = false;
-  Isolate* isolate = it->isolate();
-  // Make sure that the top context does not change when doing callbacks or
-  // interceptor calls.
-  AssertNoContextChange ncc(isolate);
-
   DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
-  Handle<InterceptorInfo> interceptor = it->GetInterceptor();
-  if (interceptor->getter()->IsUndefined()) {
-    return isolate->factory()->undefined_value();
-  }
-
-  Handle<JSObject> holder = it->GetHolder<JSObject>();
-  Handle<Object> result;
-  Handle<Object> receiver = it->GetReceiver();
-  if (!receiver->IsJSReceiver()) {
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate, receiver, Object::ConvertReceiver(isolate, receiver), Object);
-  }
-  PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
-                                 *holder, Object::DONT_THROW);
-
-  if (it->IsElement()) {
-    uint32_t index = it->index();
-    v8::IndexedPropertyGetterCallback getter =
-        v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
-    result = args.Call(getter, index);
-  } else {
-    Handle<Name> name = it->name();
-    DCHECK(!name->IsPrivate());
-
-    if (name->IsSymbol() && !interceptor->can_intercept_symbols()) {
-      return isolate->factory()->undefined_value();
-    }
-
-    v8::GenericNamedPropertyGetterCallback getter =
-        v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
-            interceptor->getter());
-    result = args.Call(getter, name);
-  }
-
-  RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
-  if (result.is_null()) return isolate->factory()->undefined_value();
-  *done = true;
-  // Rebox handle before return
-  return handle(*result, isolate);
+  return GetPropertyWithInterceptorInternal(it, it->GetInterceptor(), done);
 }
 
-
 Maybe<bool> JSObject::HasRealNamedProperty(Handle<JSObject> object,
                                            Handle<Name> name) {
   LookupIterator it = LookupIterator::PropertyOrElement(
@@ -15292,12 +15673,25 @@
 
 MaybeHandle<String> Object::ObjectProtoToString(Isolate* isolate,
                                                 Handle<Object> object) {
-  if (object->IsUndefined()) return isolate->factory()->undefined_to_string();
-  if (object->IsNull()) return isolate->factory()->null_to_string();
+  if (*object == isolate->heap()->undefined_value()) {
+    return isolate->factory()->undefined_to_string();
+  }
+  if (*object == isolate->heap()->null_value()) {
+    return isolate->factory()->null_to_string();
+  }
 
   Handle<JSReceiver> receiver =
       Object::ToObject(isolate, object).ToHandleChecked();
 
+  // For proxies, we must check IsArray() before get(toStringTag) to comply
+  // with the specification
+  Maybe<bool> is_array = Nothing<bool>();
+  InstanceType instance_type = receiver->map()->instance_type();
+  if (instance_type == JS_PROXY_TYPE) {
+    is_array = Object::IsArray(receiver);
+    MAYBE_RETURN(is_array, MaybeHandle<String>());
+  }
+
   Handle<String> tag;
   Handle<Object> to_string_tag;
   ASSIGN_RETURN_ON_EXCEPTION(
@@ -15307,11 +15701,55 @@
       String);
   if (to_string_tag->IsString()) {
     tag = Handle<String>::cast(to_string_tag);
-  }
-
-  if (tag.is_null()) {
-    ASSIGN_RETURN_ON_EXCEPTION(isolate, tag,
-                               JSReceiver::BuiltinStringTag(receiver), String);
+  } else {
+    switch (instance_type) {
+      case JS_API_OBJECT_TYPE:
+      case JS_SPECIAL_API_OBJECT_TYPE:
+        tag = handle(receiver->class_name(), isolate);
+        break;
+      case JS_ARGUMENTS_TYPE:
+        return isolate->factory()->arguments_to_string();
+      case JS_ARRAY_TYPE:
+        return isolate->factory()->array_to_string();
+      case JS_BOUND_FUNCTION_TYPE:
+      case JS_FUNCTION_TYPE:
+        return isolate->factory()->function_to_string();
+      case JS_ERROR_TYPE:
+        return isolate->factory()->error_to_string();
+      case JS_DATE_TYPE:
+        return isolate->factory()->date_to_string();
+      case JS_REGEXP_TYPE:
+        return isolate->factory()->regexp_to_string();
+      case JS_PROXY_TYPE: {
+        if (is_array.FromJust()) {
+          return isolate->factory()->array_to_string();
+        }
+        if (receiver->IsCallable()) {
+          return isolate->factory()->function_to_string();
+        }
+        return isolate->factory()->object_to_string();
+      }
+      case JS_VALUE_TYPE: {
+        Object* value = JSValue::cast(*receiver)->value();
+        if (value->IsString()) {
+          return isolate->factory()->string_to_string();
+        }
+        if (value->IsNumber()) {
+          return isolate->factory()->number_to_string();
+        }
+        if (value->IsBoolean()) {
+          return isolate->factory()->boolean_to_string();
+        }
+        if (value->IsSymbol()) {
+          return isolate->factory()->object_to_string();
+        }
+        UNREACHABLE();
+        tag = handle(receiver->class_name(), isolate);
+        break;
+      }
+      default:
+        return isolate->factory()->object_to_string();
+    }
   }
 
   IncrementalStringBuilder builder(isolate);
@@ -15321,7 +15759,6 @@
   return builder.Finish();
 }
 
-
 const char* Symbol::PrivateSymbolToName() const {
   Heap* heap = GetIsolate()->heap();
 #define SYMBOL_CHECK_AND_PRINT(name) \
@@ -15333,12 +15770,12 @@
 
 
 void Symbol::SymbolShortPrint(std::ostream& os) {
-  os << "<Symbol: " << Hash();
-  if (!name()->IsUndefined()) {
+  os << "<Symbol:";
+  if (!name()->IsUndefined(GetIsolate())) {
     os << " ";
     HeapStringAllocator allocator;
     StringStream accumulator(&allocator);
-    String::cast(name())->StringShortPrint(&accumulator);
+    String::cast(name())->StringShortPrint(&accumulator, false);
     os << accumulator.ToCString().get();
   } else {
     os << " (" << PrivateSymbolToName() << ")";
@@ -15456,7 +15893,6 @@
         flag = JSRegExp::kMultiline;
         break;
       case 'u':
-        if (!FLAG_harmony_unicode_regexps) return JSRegExp::Flags(0);
         flag = JSRegExp::kUnicode;
         break;
       case 'y':
@@ -15771,21 +16207,12 @@
   uint32_t capacity = this->Capacity();
   uint32_t entry = Derived::FirstProbe(key->Hash(), capacity);
   uint32_t count = 1;
-
+  Isolate* isolate = this->GetIsolate();
   while (true) {
-    int index = Derived::EntryToIndex(entry);
-    Object* element = this->get(index);
-    if (element->IsUndefined()) break;  // Empty entry.
+    Object* element = this->KeyAt(entry);
+    if (element->IsUndefined(isolate)) break;  // Empty entry.
     if (*key == element) return entry;
-    if (!element->IsUniqueName() &&
-        !element->IsTheHole() &&
-        Name::cast(element)->Equals(*key)) {
-      // Replace a key that is a non-internalized string by the equivalent
-      // internalized string for faster further lookups.
-      this->set(index, *key);
-      return entry;
-    }
-    DCHECK(element->IsTheHole() || !Name::cast(element)->Equals(*key));
+    DCHECK(element->IsTheHole(isolate) || element->IsUniqueName());
     entry = Derived::NextProbe(entry, count++, capacity);
   }
   return Derived::kNotFound;
@@ -15870,6 +16297,7 @@
 void HashTable<Derived, Shape, Key>::Rehash(Key key) {
   DisallowHeapAllocation no_gc;
   WriteBarrierMode mode = GetWriteBarrierMode(no_gc);
+  Isolate* isolate = GetIsolate();
   uint32_t capacity = Capacity();
   bool done = false;
   for (int probe = 1; !done; probe++) {
@@ -15877,11 +16305,11 @@
     // are placed correctly. Other elements might need to be moved.
     done = true;
     for (uint32_t current = 0; current < capacity; current++) {
-      Object* current_key = get(EntryToIndex(current));
-      if (IsKey(current_key)) {
+      Object* current_key = KeyAt(current);
+      if (IsKey(isolate, current_key)) {
         uint32_t target = EntryForProbe(key, current_key, probe, current);
         if (current == target) continue;
-        Object* target_key = get(EntryToIndex(target));
+        Object* target_key = KeyAt(target);
         if (!IsKey(target_key) ||
             EntryForProbe(key, target_key, probe, target) != target) {
           // Put the current element into the correct position.
@@ -15897,12 +16325,11 @@
     }
   }
   // Wipe deleted entries.
-  Heap* heap = GetHeap();
-  Object* the_hole = heap->the_hole_value();
-  Object* undefined = heap->undefined_value();
+  Object* the_hole = isolate->heap()->the_hole_value();
+  Object* undefined = isolate->heap()->undefined_value();
   for (uint32_t current = 0; current < capacity; current++) {
-    if (get(EntryToIndex(current)) == the_hole) {
-      set(EntryToIndex(current), undefined);
+    if (KeyAt(current) == the_hole) {
+      set(EntryToIndex(current) + Derived::kEntryKeyIndex, undefined);
     }
   }
   SetNumberOfDeletedElements(0);
@@ -15919,7 +16346,7 @@
   int capacity = table->Capacity();
   int nof = table->NumberOfElements() + n;
 
-  if (table->HasSufficientCapacity(n)) return table;
+  if (table->HasSufficientCapacityToAdd(n)) return table;
 
   const int kMinCapacityForPretenure = 256;
   bool should_pretenure = pretenure == TENURED ||
@@ -15935,16 +16362,16 @@
   return new_table;
 }
 
-
 template <typename Derived, typename Shape, typename Key>
-bool HashTable<Derived, Shape, Key>::HasSufficientCapacity(int n) {
+bool HashTable<Derived, Shape, Key>::HasSufficientCapacityToAdd(
+    int number_of_additional_elements) {
   int capacity = Capacity();
-  int nof = NumberOfElements() + n;
+  int nof = NumberOfElements() + number_of_additional_elements;
   int nod = NumberOfDeletedElements();
   // Return true if:
-  //   50% is still free after adding n elements and
+  //   50% is still free after adding number_of_additional_elements elements and
   //   at most 50% of the free elements are deleted elements.
-  if (nod <= (capacity - nof) >> 1) {
+  if ((nof < capacity) && ((nod <= (capacity - nof) >> 1))) {
     int needed_free = nof >> 1;
     if (nof + needed_free <= capacity) return true;
   }
@@ -15990,12 +16417,10 @@
   uint32_t entry = FirstProbe(hash, capacity);
   uint32_t count = 1;
   // EnsureCapacity will guarantee the hash table is never full.
-  Heap* heap = GetHeap();
-  Object* the_hole = heap->the_hole_value();
-  Object* undefined = heap->undefined_value();
+  Isolate* isolate = GetIsolate();
   while (true) {
     Object* element = KeyAt(entry);
-    if (element == the_hole || element == undefined) break;
+    if (!IsKey(isolate, element)) break;
     entry = NextProbe(entry, count++, capacity);
   }
   return entry;
@@ -16187,7 +16612,7 @@
   DisallowHeapAllocation no_gc;
   for (int i = 0; i < capacity; i++) {
     Object* k = dict->KeyAt(i);
-    if (!dict->IsKey(k)) continue;
+    if (!dict->IsKey(isolate, k)) continue;
 
     DCHECK(k->IsNumber());
     DCHECK(!k->IsSmi() || Smi::cast(k)->value() >= 0);
@@ -16205,7 +16630,7 @@
 
     uint32_t key = NumberToUint32(k);
     if (key < limit) {
-      if (value->IsUndefined()) {
+      if (value->IsUndefined(isolate)) {
         undefs++;
       } else if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
         // Adding an entry with the key beyond smi-range requires
@@ -16355,10 +16780,10 @@
     // number of stores of non-undefined, non-the-hole values.
     for (unsigned int i = 0; i < undefs; i++) {
       Object* current = elements->get(i);
-      if (current->IsTheHole()) {
+      if (current->IsTheHole(isolate)) {
         holes--;
         undefs--;
-      } else if (current->IsUndefined()) {
+      } else if (current->IsUndefined(isolate)) {
         undefs--;
       } else {
         continue;
@@ -16366,10 +16791,10 @@
       // Position i needs to be filled.
       while (undefs > i) {
         current = elements->get(undefs);
-        if (current->IsTheHole()) {
+        if (current->IsTheHole(isolate)) {
           holes--;
           undefs--;
-        } else if (current->IsUndefined()) {
+        } else if (current->IsUndefined(isolate)) {
           undefs--;
         } else {
           elements->set(i, current, write_barrier);
@@ -16437,8 +16862,9 @@
 // TODO(ishell): rename to EnsureEmptyPropertyCell or something.
 Handle<PropertyCell> JSGlobalObject::EnsurePropertyCell(
     Handle<JSGlobalObject> global, Handle<Name> name) {
+  Isolate* isolate = global->GetIsolate();
   DCHECK(!global->HasFastProperties());
-  auto dictionary = handle(global->global_dictionary());
+  auto dictionary = handle(global->global_dictionary(), isolate);
   int entry = dictionary->FindEntry(name);
   Handle<PropertyCell> cell;
   if (entry != GlobalDictionary::kNotFound) {
@@ -16449,10 +16875,9 @@
                PropertyCellType::kUninitialized ||
            cell->property_details().cell_type() ==
                PropertyCellType::kInvalidated);
-    DCHECK(cell->value()->IsTheHole());
+    DCHECK(cell->value()->IsTheHole(isolate));
     return cell;
   }
-  Isolate* isolate = global->GetIsolate();
   cell = isolate->factory()->NewPropertyCell();
   PropertyDetails details(NONE, DATA, 0, PropertyCellType::kUninitialized);
   dictionary = GlobalDictionary::Add(dictionary, name, cell, details);
@@ -16764,7 +17189,8 @@
 void CompilationCacheTable::Age() {
   DisallowHeapAllocation no_allocation;
   Object* the_hole_value = GetHeap()->the_hole_value();
-  for (int entry = 0, size = Capacity(); entry < size; entry++) {
+  uint32_t capacity = Capacity();
+  for (int entry = 0, size = capacity; entry < size; entry++) {
     int entry_index = EntryToIndex(entry);
     int value_index = entry_index + 1;
 
@@ -16787,6 +17213,16 @@
       }
     }
   }
+  // Wipe deleted entries.
+  Heap* heap = GetHeap();
+  Object* the_hole = heap->the_hole_value();
+  Object* undefined = heap->undefined_value();
+  for (uint32_t current = 0; current < capacity; current++) {
+    if (get(EntryToIndex(current)) == the_hole) {
+      set(EntryToIndex(current), undefined);
+    }
+  }
+  SetNumberOfDeletedElements(0);
 }
 
 
@@ -16826,7 +17262,8 @@
 template <typename Derived, typename Shape, typename Key>
 Handle<FixedArray> Dictionary<Derived, Shape, Key>::BuildIterationIndicesArray(
     Handle<Derived> dictionary) {
-  Factory* factory = dictionary->GetIsolate()->factory();
+  Isolate* isolate = dictionary->GetIsolate();
+  Factory* factory = isolate->factory();
   int length = dictionary->NumberOfElements();
 
   Handle<FixedArray> iteration_order = factory->NewFixedArray(length);
@@ -16837,7 +17274,7 @@
   int capacity = dictionary->Capacity();
   int pos = 0;
   for (int i = 0; i < capacity; i++) {
-    if (dictionary->IsKey(dictionary->KeyAt(i))) {
+    if (dictionary->IsKey(isolate, dictionary->KeyAt(i))) {
       int index = dictionary->DetailsAt(i).dictionary_index();
       iteration_order->set(pos, Smi::FromInt(i));
       enumeration_order->set(pos, Smi::FromInt(index));
@@ -16886,7 +17323,7 @@
   DCHECK_EQ(0, DerivedHashTable::NumberOfDeletedElements());
   // Make sure that HashTable::EnsureCapacity will create a copy.
   DerivedHashTable::SetNumberOfDeletedElements(DerivedHashTable::Capacity());
-  DCHECK(!DerivedHashTable::HasSufficientCapacity(1));
+  DCHECK(!DerivedHashTable::HasSufficientCapacityToAdd(1));
 }
 
 
@@ -16984,16 +17421,16 @@
 
 bool SeededNumberDictionary::HasComplexElements() {
   if (!requires_slow_elements()) return false;
+  Isolate* isolate = this->GetIsolate();
   int capacity = this->Capacity();
   for (int i = 0; i < capacity; i++) {
     Object* k = this->KeyAt(i);
-    if (this->IsKey(k)) {
-      DCHECK(!IsDeleted(i));
-      PropertyDetails details = this->DetailsAt(i);
-      if (details.type() == ACCESSOR_CONSTANT) return true;
-      PropertyAttributes attr = details.attributes();
-      if (attr & ALL_ATTRIBUTES_MASK) return true;
-    }
+    if (!this->IsKey(isolate, k)) continue;
+    DCHECK(!IsDeleted(i));
+    PropertyDetails details = this->DetailsAt(i);
+    if (details.type() == ACCESSOR_CONSTANT) return true;
+    PropertyAttributes attr = details.attributes();
+    if (attr & ALL_ATTRIBUTES_MASK) return true;
   }
   return false;
 }
@@ -17089,11 +17526,12 @@
 template <typename Derived, typename Shape, typename Key>
 int Dictionary<Derived, Shape, Key>::NumberOfElementsFilterAttributes(
     PropertyFilter filter) {
+  Isolate* isolate = this->GetIsolate();
   int capacity = this->Capacity();
   int result = 0;
   for (int i = 0; i < capacity; i++) {
     Object* k = this->KeyAt(i);
-    if (this->IsKey(k) && !k->FilterKey(filter)) {
+    if (this->IsKey(isolate, k) && !k->FilterKey(filter)) {
       if (this->IsDeleted(i)) continue;
       PropertyDetails details = this->DetailsAt(i);
       PropertyAttributes attr = details.attributes();
@@ -17118,12 +17556,13 @@
 
 template <typename Derived, typename Shape, typename Key>
 void Dictionary<Derived, Shape, Key>::CopyEnumKeysTo(FixedArray* storage) {
+  Isolate* isolate = this->GetIsolate();
   int length = storage->length();
   int capacity = this->Capacity();
   int properties = 0;
   for (int i = 0; i < capacity; i++) {
     Object* k = this->KeyAt(i);
-    if (this->IsKey(k) && !k->IsSymbol()) {
+    if (this->IsKey(isolate, k) && !k->IsSymbol()) {
       PropertyDetails details = this->DetailsAt(i);
       if (details.IsDontEnum() || this->IsDeleted(i)) continue;
       storage->set(properties, Smi::FromInt(i));
@@ -17145,9 +17584,10 @@
 void Dictionary<Derived, Shape, Key>::CollectKeysTo(
     Handle<Dictionary<Derived, Shape, Key> > dictionary, KeyAccumulator* keys,
     PropertyFilter filter) {
+  Isolate* isolate = keys->isolate();
   int capacity = dictionary->Capacity();
   Handle<FixedArray> array =
-      keys->isolate()->factory()->NewFixedArray(dictionary->NumberOfElements());
+      isolate->factory()->NewFixedArray(dictionary->NumberOfElements());
   int array_size = 0;
 
   {
@@ -17155,7 +17595,7 @@
     Dictionary<Derived, Shape, Key>* raw_dict = *dictionary;
     for (int i = 0; i < capacity; i++) {
       Object* k = raw_dict->KeyAt(i);
-      if (!raw_dict->IsKey(k) || k->FilterKey(filter)) continue;
+      if (!raw_dict->IsKey(isolate, k) || k->FilterKey(filter)) continue;
       if (raw_dict->IsDeleted(i)) continue;
       PropertyDetails details = raw_dict->DetailsAt(i);
       if ((details.attributes() & filter) != 0) continue;
@@ -17176,9 +17616,23 @@
     std::sort(start, start + array_size, cmp);
   }
 
+  bool has_seen_symbol = false;
   for (int i = 0; i < array_size; i++) {
     int index = Smi::cast(array->get(i))->value();
-    keys->AddKey(dictionary->KeyAt(index), DO_NOT_CONVERT);
+    Object* key = dictionary->KeyAt(index);
+    if (key->IsSymbol()) {
+      has_seen_symbol = true;
+      continue;
+    }
+    keys->AddKey(key, DO_NOT_CONVERT);
+  }
+  if (has_seen_symbol) {
+    for (int i = 0; i < array_size; i++) {
+      int index = Smi::cast(array->get(i))->value();
+      Object* key = dictionary->KeyAt(index);
+      if (!key->IsSymbol()) continue;
+      keys->AddKey(key, DO_NOT_CONVERT);
+    }
   }
 }
 
@@ -17186,27 +17640,26 @@
 // Backwards lookup (slow).
 template<typename Derived, typename Shape, typename Key>
 Object* Dictionary<Derived, Shape, Key>::SlowReverseLookup(Object* value) {
+  Isolate* isolate = this->GetIsolate();
   int capacity = this->Capacity();
   for (int i = 0; i < capacity; i++) {
     Object* k = this->KeyAt(i);
-    if (this->IsKey(k)) {
-      Object* e = this->ValueAt(i);
-      // TODO(dcarney): this should be templatized.
-      if (e->IsPropertyCell()) {
-        e = PropertyCell::cast(e)->value();
-      }
-      if (e == value) return k;
+    if (!this->IsKey(isolate, k)) continue;
+    Object* e = this->ValueAt(i);
+    // TODO(dcarney): this should be templatized.
+    if (e->IsPropertyCell()) {
+      e = PropertyCell::cast(e)->value();
     }
+    if (e == value) return k;
   }
-  Heap* heap = Dictionary::GetHeap();
-  return heap->undefined_value();
+  return isolate->heap()->undefined_value();
 }
 
 
 Object* ObjectHashTable::Lookup(Isolate* isolate, Handle<Object> key,
                                 int32_t hash) {
   DisallowHeapAllocation no_gc;
-  DCHECK(IsKey(*key));
+  DCHECK(IsKey(isolate, *key));
 
   int entry = FindEntry(isolate, key, hash);
   if (entry == kNotFound) return isolate->heap()->the_hole_value();
@@ -17216,13 +17669,13 @@
 
 Object* ObjectHashTable::Lookup(Handle<Object> key) {
   DisallowHeapAllocation no_gc;
-  DCHECK(IsKey(*key));
 
   Isolate* isolate = GetIsolate();
+  DCHECK(IsKey(isolate, *key));
 
   // If the object does not have an identity hash, it was never used as a key.
   Object* hash = key->GetHash();
-  if (hash->IsUndefined()) {
+  if (hash->IsUndefined(isolate)) {
     return isolate->heap()->the_hole_value();
   }
   return Lookup(isolate, key, Smi::cast(hash)->value());
@@ -17237,10 +17690,10 @@
 Handle<ObjectHashTable> ObjectHashTable::Put(Handle<ObjectHashTable> table,
                                              Handle<Object> key,
                                              Handle<Object> value) {
-  DCHECK(table->IsKey(*key));
-  DCHECK(!value->IsTheHole());
-
   Isolate* isolate = table->GetIsolate();
+  DCHECK(table->IsKey(isolate, *key));
+  DCHECK(!value->IsTheHole(isolate));
+
   // Make sure the key object has an identity hash code.
   int32_t hash = Object::GetOrCreateHash(isolate, key)->value();
 
@@ -17252,10 +17705,9 @@
                                              Handle<Object> key,
                                              Handle<Object> value,
                                              int32_t hash) {
-  DCHECK(table->IsKey(*key));
-  DCHECK(!value->IsTheHole());
-
   Isolate* isolate = table->GetIsolate();
+  DCHECK(table->IsKey(isolate, *key));
+  DCHECK(!value->IsTheHole(isolate));
 
   int entry = table->FindEntry(isolate, key, hash);
 
@@ -17265,11 +17717,24 @@
     return table;
   }
 
-  // Rehash if more than 33% of the entries are deleted entries.
+  // Rehash if more than 25% of the entries are deleted entries.
   // TODO(jochen): Consider to shrink the fixed array in place.
   if ((table->NumberOfDeletedElements() << 1) > table->NumberOfElements()) {
     table->Rehash(isolate->factory()->undefined_value());
   }
+  // If we're out of luck, we didn't get a GC recently, and so rehashing
+  // isn't enough to avoid a crash.
+  if (!table->HasSufficientCapacityToAdd(1)) {
+    int nof = table->NumberOfElements() + 1;
+    int capacity = ObjectHashTable::ComputeCapacity(nof * 2);
+    if (capacity > ObjectHashTable::kMaxCapacity) {
+      for (size_t i = 0; i < 2; ++i) {
+        isolate->heap()->CollectAllGarbage(
+            Heap::kFinalizeIncrementalMarkingMask, "full object hash table");
+      }
+      table->Rehash(isolate->factory()->undefined_value());
+    }
+  }
 
   // Check whether the hash table should be extended.
   table = EnsureCapacity(table, 1, key);
@@ -17281,10 +17746,10 @@
 Handle<ObjectHashTable> ObjectHashTable::Remove(Handle<ObjectHashTable> table,
                                                 Handle<Object> key,
                                                 bool* was_present) {
-  DCHECK(table->IsKey(*key));
+  DCHECK(table->IsKey(table->GetIsolate(), *key));
 
   Object* hash = key->GetHash();
-  if (hash->IsUndefined()) {
+  if (hash->IsUndefined(table->GetIsolate())) {
     *was_present = false;
     return table;
   }
@@ -17297,9 +17762,10 @@
                                                 Handle<Object> key,
                                                 bool* was_present,
                                                 int32_t hash) {
-  DCHECK(table->IsKey(*key));
+  Isolate* isolate = table->GetIsolate();
+  DCHECK(table->IsKey(isolate, *key));
 
-  int entry = table->FindEntry(table->GetIsolate(), key, hash);
+  int entry = table->FindEntry(isolate, key, hash);
   if (entry == kNotFound) {
     *was_present = false;
     return table;
@@ -17327,9 +17793,10 @@
 
 Object* WeakHashTable::Lookup(Handle<HeapObject> key) {
   DisallowHeapAllocation no_gc;
-  DCHECK(IsKey(*key));
+  Isolate* isolate = GetIsolate();
+  DCHECK(IsKey(isolate, *key));
   int entry = FindEntry(key);
-  if (entry == kNotFound) return GetHeap()->the_hole_value();
+  if (entry == kNotFound) return isolate->heap()->the_hole_value();
   return get(EntryToValueIndex(entry));
 }
 
@@ -17337,7 +17804,8 @@
 Handle<WeakHashTable> WeakHashTable::Put(Handle<WeakHashTable> table,
                                          Handle<HeapObject> key,
                                          Handle<HeapObject> value) {
-  DCHECK(table->IsKey(*key));
+  Isolate* isolate = key->GetIsolate();
+  DCHECK(table->IsKey(isolate, *key));
   int entry = table->FindEntry(key);
   // Key is already in table, just overwrite value.
   if (entry != kNotFound) {
@@ -17345,7 +17813,7 @@
     return table;
   }
 
-  Handle<WeakCell> key_cell = key->GetIsolate()->factory()->NewWeakCell(key);
+  Handle<WeakCell> key_cell = isolate->factory()->NewWeakCell(key);
 
   // Check whether the hash table should be extended.
   table = EnsureCapacity(table, 1, key, TENURED);
@@ -17439,11 +17907,14 @@
 template <class Derived, class Iterator, int entrysize>
 bool OrderedHashTable<Derived, Iterator, entrysize>::HasKey(
     Handle<Derived> table, Handle<Object> key) {
-  int entry = table->KeyToFirstEntry(*key);
+  DisallowHeapAllocation no_gc;
+  Isolate* isolate = table->GetIsolate();
+  Object* raw_key = *key;
+  int entry = table->KeyToFirstEntry(isolate, raw_key);
   // Walk the chain in the bucket to find the key.
   while (entry != kNotFound) {
     Object* candidate_key = table->KeyAt(entry);
-    if (candidate_key->SameValueZero(*key)) return true;
+    if (candidate_key->SameValueZero(raw_key)) return true;
     entry = table->NextChainEntry(entry);
   }
   return false;
@@ -17478,16 +17949,36 @@
   return table;
 }
 
+Handle<FixedArray> OrderedHashSet::ConvertToKeysArray(
+    Handle<OrderedHashSet> table, GetKeysConversion convert) {
+  Isolate* isolate = table->GetIsolate();
+  int length = table->NumberOfElements();
+  int nof_buckets = table->NumberOfBuckets();
+  // Convert the dictionary to a linear list.
+  Handle<FixedArray> result = Handle<FixedArray>::cast(table);
+  // From this point on table is no longer a valid OrderedHashSet.
+  result->set_map(isolate->heap()->fixed_array_map());
+  for (int i = 0; i < length; i++) {
+    int index = kHashTableStartIndex + nof_buckets + (i * kEntrySize);
+    Object* key = table->get(index);
+    if (convert == GetKeysConversion::kConvertToString && key->IsNumber()) {
+      key = *isolate->factory()->NumberToString(handle(key, isolate));
+    }
+    result->set(i, key);
+  }
+  result->Shrink(length);
+  return result;
+}
 
 template<class Derived, class Iterator, int entrysize>
 Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::Rehash(
     Handle<Derived> table, int new_capacity) {
   Isolate* isolate = table->GetIsolate();
-  Heap* heap = isolate->heap();
   DCHECK(!table->IsObsolete());
 
-  Handle<Derived> new_table = Allocate(
-      isolate, new_capacity, heap->InNewSpace(*table) ? NOT_TENURED : TENURED);
+  Handle<Derived> new_table =
+      Allocate(isolate, new_capacity,
+               isolate->heap()->InNewSpace(*table) ? NOT_TENURED : TENURED);
   int nof = table->NumberOfElements();
   int nod = table->NumberOfDeletedElements();
   int new_buckets = new_table->NumberOfBuckets();
@@ -17495,10 +17986,9 @@
   int removed_holes_index = 0;
 
   DisallowHeapAllocation no_gc;
-  Object* the_hole = heap->the_hole_value();
   for (int old_entry = 0; old_entry < (nof + nod); ++old_entry) {
     Object* key = table->KeyAt(old_entry);
-    if (key == the_hole) {
+    if (key->IsTheHole(isolate)) {
       table->SetRemovedIndexAt(removed_holes_index++, old_entry);
       continue;
     }
@@ -17602,7 +18092,8 @@
 template<class Derived, class TableType>
 bool OrderedHashTableIterator<Derived, TableType>::HasMore() {
   DisallowHeapAllocation no_allocation;
-  if (this->table()->IsUndefined()) return false;
+  Isolate* isolate = this->GetIsolate();
+  if (this->table()->IsUndefined(isolate)) return false;
 
   Transition();
 
@@ -17610,7 +18101,7 @@
   int index = Smi::cast(this->index())->value();
   int used_capacity = table->UsedCapacity();
 
-  while (index < used_capacity && table->KeyAt(index)->IsTheHole()) {
+  while (index < used_capacity && table->KeyAt(index)->IsTheHole(isolate)) {
     index++;
   }
 
@@ -17618,7 +18109,7 @@
 
   if (index < used_capacity) return true;
 
-  set_table(GetHeap()->undefined_value());
+  set_table(isolate->heap()->undefined_value());
   return false;
 }
 
@@ -17744,7 +18235,7 @@
 
   // If there is no break point info object or no break points in the break
   // point info object there is no break point at this code offset.
-  if (break_point_info->IsUndefined()) return false;
+  if (break_point_info->IsUndefined(GetIsolate())) return false;
   return BreakPointInfo::cast(break_point_info)->GetBreakPointCount() > 0;
 }
 
@@ -17761,9 +18252,10 @@
 // Clear a break point at the specified code offset.
 void DebugInfo::ClearBreakPoint(Handle<DebugInfo> debug_info, int code_offset,
                                 Handle<Object> break_point_object) {
+  Isolate* isolate = debug_info->GetIsolate();
   Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_offset),
-                                  debug_info->GetIsolate());
-  if (break_point_info->IsUndefined()) return;
+                                  isolate);
+  if (break_point_info->IsUndefined(isolate)) return;
   BreakPointInfo::ClearBreakPoint(
       Handle<BreakPointInfo>::cast(break_point_info),
       break_point_object);
@@ -17775,7 +18267,7 @@
   Isolate* isolate = debug_info->GetIsolate();
   Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_offset),
                                   isolate);
-  if (!break_point_info->IsUndefined()) {
+  if (!break_point_info->IsUndefined(isolate)) {
     BreakPointInfo::SetBreakPoint(
         Handle<BreakPointInfo>::cast(break_point_info),
         break_point_object);
@@ -17786,15 +18278,15 @@
   // break points before. Try to find a free slot.
   int index = kNoBreakPointInfo;
   for (int i = 0; i < debug_info->break_points()->length(); i++) {
-    if (debug_info->break_points()->get(i)->IsUndefined()) {
+    if (debug_info->break_points()->get(i)->IsUndefined(isolate)) {
       index = i;
       break;
     }
   }
   if (index == kNoBreakPointInfo) {
     // No free slot - extend break point info array.
-    Handle<FixedArray> old_break_points =
-        Handle<FixedArray>(FixedArray::cast(debug_info->break_points()));
+    Handle<FixedArray> old_break_points = Handle<FixedArray>(
+        FixedArray::cast(debug_info->break_points()), isolate);
     Handle<FixedArray> new_break_points =
         isolate->factory()->NewFixedArray(
             old_break_points->length() +
@@ -17823,21 +18315,22 @@
 // Get the break point objects for a code offset.
 Handle<Object> DebugInfo::GetBreakPointObjects(int code_offset) {
   Object* break_point_info = GetBreakPointInfo(code_offset);
-  if (break_point_info->IsUndefined()) {
-    return GetIsolate()->factory()->undefined_value();
+  Isolate* isolate = GetIsolate();
+  if (break_point_info->IsUndefined(isolate)) {
+    return isolate->factory()->undefined_value();
   }
   return Handle<Object>(
-      BreakPointInfo::cast(break_point_info)->break_point_objects(),
-      GetIsolate());
+      BreakPointInfo::cast(break_point_info)->break_point_objects(), isolate);
 }
 
 
 // Get the total number of break points.
 int DebugInfo::GetBreakPointCount() {
-  if (break_points()->IsUndefined()) return 0;
+  Isolate* isolate = GetIsolate();
+  if (break_points()->IsUndefined(isolate)) return 0;
   int count = 0;
   for (int i = 0; i < break_points()->length(); i++) {
-    if (!break_points()->get(i)->IsUndefined()) {
+    if (!break_points()->get(i)->IsUndefined(isolate)) {
       BreakPointInfo* break_point_info =
           BreakPointInfo::cast(break_points()->get(i));
       count += break_point_info->GetBreakPointCount();
@@ -17850,9 +18343,9 @@
 Handle<Object> DebugInfo::FindBreakPointInfo(
     Handle<DebugInfo> debug_info, Handle<Object> break_point_object) {
   Isolate* isolate = debug_info->GetIsolate();
-  if (!debug_info->break_points()->IsUndefined()) {
+  if (!debug_info->break_points()->IsUndefined(isolate)) {
     for (int i = 0; i < debug_info->break_points()->length(); i++) {
-      if (!debug_info->break_points()->get(i)->IsUndefined()) {
+      if (!debug_info->break_points()->get(i)->IsUndefined(isolate)) {
         Handle<BreakPointInfo> break_point_info = Handle<BreakPointInfo>(
             BreakPointInfo::cast(debug_info->break_points()->get(i)), isolate);
         if (BreakPointInfo::HasBreakPointObject(break_point_info,
@@ -17869,9 +18362,10 @@
 // Find the index of the break point info object for the specified code
 // position.
 int DebugInfo::GetBreakPointInfoIndex(int code_offset) {
-  if (break_points()->IsUndefined()) return kNoBreakPointInfo;
+  Isolate* isolate = GetIsolate();
+  if (break_points()->IsUndefined(isolate)) return kNoBreakPointInfo;
   for (int i = 0; i < break_points()->length(); i++) {
-    if (!break_points()->get(i)->IsUndefined()) {
+    if (!break_points()->get(i)->IsUndefined(isolate)) {
       BreakPointInfo* break_point_info =
           BreakPointInfo::cast(break_points()->get(i));
       if (break_point_info->code_offset() == code_offset) {
@@ -17888,7 +18382,7 @@
                                      Handle<Object> break_point_object) {
   Isolate* isolate = break_point_info->GetIsolate();
   // If there are no break points just ignore.
-  if (break_point_info->break_point_objects()->IsUndefined()) return;
+  if (break_point_info->break_point_objects()->IsUndefined(isolate)) return;
   // If there is a single break point clear it if it is the same.
   if (!break_point_info->break_point_objects()->IsFixedArray()) {
     if (break_point_info->break_point_objects() == *break_point_object) {
@@ -17924,7 +18418,7 @@
   Isolate* isolate = break_point_info->GetIsolate();
 
   // If there was no break point objects before just set it.
-  if (break_point_info->break_point_objects()->IsUndefined()) {
+  if (break_point_info->break_point_objects()->IsUndefined(isolate)) {
     break_point_info->set_break_point_objects(*break_point_object);
     return;
   }
@@ -17959,7 +18453,10 @@
     Handle<BreakPointInfo> break_point_info,
     Handle<Object> break_point_object) {
   // No break point.
-  if (break_point_info->break_point_objects()->IsUndefined()) return false;
+  Isolate* isolate = break_point_info->GetIsolate();
+  if (break_point_info->break_point_objects()->IsUndefined(isolate)) {
+    return false;
+  }
   // Single break point.
   if (!break_point_info->break_point_objects()->IsFixedArray()) {
     return break_point_info->break_point_objects() == *break_point_object;
@@ -17978,7 +18475,7 @@
 // Get the number of break points.
 int BreakPointInfo::GetBreakPointCount() {
   // No break point.
-  if (break_point_objects()->IsUndefined()) return 0;
+  if (break_point_objects()->IsUndefined(GetIsolate())) return 0;
   // Single break point.
   if (!break_point_objects()->IsFixedArray()) return 1;
   // Multiple break points.
@@ -18305,7 +18802,7 @@
   auto new_cell = isolate->factory()->NewPropertyCell();
   new_cell->set_value(cell->value());
   dictionary->ValueAtPut(entry, *new_cell);
-  bool is_the_hole = cell->value()->IsTheHole();
+  bool is_the_hole = cell->value()->IsTheHole(isolate);
   // Cell is officially mutable henceforth.
   PropertyDetails details = cell->property_details();
   details = details.set_cell_type(is_the_hole ? PropertyCellType::kInvalidated
@@ -18349,12 +18846,13 @@
                                            Handle<Object> value,
                                            PropertyDetails details) {
   PropertyCellType type = details.cell_type();
-  DCHECK(!value->IsTheHole());
-  if (cell->value()->IsTheHole()) {
+  Isolate* isolate = cell->GetIsolate();
+  DCHECK(!value->IsTheHole(isolate));
+  if (cell->value()->IsTheHole(isolate)) {
     switch (type) {
       // Only allow a cell to transition once into constant state.
       case PropertyCellType::kUninitialized:
-        if (value->IsUndefined()) return PropertyCellType::kUndefined;
+        if (value->IsUndefined(isolate)) return PropertyCellType::kUndefined;
         return PropertyCellType::kConstant;
       case PropertyCellType::kInvalidated:
         return PropertyCellType::kMutable;
@@ -18384,7 +18882,8 @@
 
 void PropertyCell::UpdateCell(Handle<GlobalDictionary> dictionary, int entry,
                               Handle<Object> value, PropertyDetails details) {
-  DCHECK(!value->IsTheHole());
+  Isolate* isolate = dictionary->GetIsolate();
+  DCHECK(!value->IsTheHole(isolate));
   DCHECK(dictionary->ValueAt(entry)->IsPropertyCell());
   Handle<PropertyCell> cell(PropertyCell::cast(dictionary->ValueAt(entry)));
   const PropertyDetails original_details = cell->property_details();
@@ -18395,7 +18894,7 @@
   PropertyCellType old_type = original_details.cell_type();
   // Preserve the enumeration index unless the property was deleted or never
   // initialized.
-  if (cell->value()->IsTheHole()) {
+  if (cell->value()->IsTheHole(isolate)) {
     index = dictionary->NextEnumerationIndex();
     dictionary->SetNextEnumerationIndex(index + 1);
     // Negative lookup cells must be invalidated.
@@ -18415,7 +18914,6 @@
   // Deopt when transitioning from a constant type.
   if (!invalidate && (old_type != new_type ||
                       original_details.IsReadOnly() != details.IsReadOnly())) {
-    Isolate* isolate = dictionary->GetIsolate();
     cell->dependent_code()->DeoptimizeDependentCodeGroup(
         isolate, DependentCode::kPropertyCellChangedGroup);
   }
@@ -18433,5 +18931,41 @@
   }
 }
 
+int JSGeneratorObject::source_position() const {
+  CHECK(is_suspended());
+  if (function()->shared()->HasBytecodeArray()) {
+    // New-style generators.
+    int offset = Smi::cast(input_or_debug_pos())->value();
+    // The stored bytecode offset is relative to a different base than what
+    // is used in the source position table, hence the subtraction.
+    offset -= BytecodeArray::kHeaderSize - kHeapObjectTag;
+    return function()->shared()->bytecode_array()->SourcePosition(offset);
+  } else {
+    // Old-style generators.
+    int offset = continuation();
+    CHECK(0 <= offset && offset < function()->code()->instruction_size());
+    return function()->code()->SourcePosition(offset);
+  }
+}
+
+// static
+AccessCheckInfo* AccessCheckInfo::Get(Isolate* isolate,
+                                      Handle<JSObject> receiver) {
+  DisallowHeapAllocation no_gc;
+  DCHECK(receiver->map()->is_access_check_needed());
+  Object* maybe_constructor = receiver->map()->GetConstructor();
+  // Might happen for a detached context.
+  if (!maybe_constructor->IsJSFunction()) return nullptr;
+  JSFunction* constructor = JSFunction::cast(maybe_constructor);
+  // Might happen for the debug context.
+  if (!constructor->shared()->IsApiFunction()) return nullptr;
+
+  Object* data_obj =
+      constructor->shared()->get_api_func_data()->access_check_info();
+  if (data_obj->IsUndefined(isolate)) return nullptr;
+
+  return AccessCheckInfo::cast(data_obj);
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/objects.h b/src/objects.h
index 15d2d72..e37b9bd 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -415,6 +415,7 @@
   V(JS_VALUE_TYPE)                                              \
   V(JS_DATE_TYPE)                                               \
   V(JS_OBJECT_TYPE)                                             \
+  V(JS_ARGUMENTS_TYPE)                                          \
   V(JS_CONTEXT_EXTENSION_OBJECT_TYPE)                           \
   V(JS_GENERATOR_OBJECT_TYPE)                                   \
   V(JS_MODULE_TYPE)                                             \
@@ -435,6 +436,7 @@
   V(JS_WEAK_SET_TYPE)                                           \
   V(JS_PROMISE_TYPE)                                            \
   V(JS_REGEXP_TYPE)                                             \
+  V(JS_ERROR_TYPE)                                              \
                                                                 \
   V(JS_BOUND_FUNCTION_TYPE)                                     \
   V(JS_FUNCTION_TYPE)                                           \
@@ -712,6 +714,7 @@
   // Like JS_OBJECT_TYPE, but created from API function.
   JS_API_OBJECT_TYPE,
   JS_OBJECT_TYPE,
+  JS_ARGUMENTS_TYPE,
   JS_CONTEXT_EXTENSION_OBJECT_TYPE,
   JS_GENERATOR_OBJECT_TYPE,
   JS_MODULE_TYPE,
@@ -727,6 +730,7 @@
   JS_WEAK_SET_TYPE,
   JS_PROMISE_TYPE,
   JS_REGEXP_TYPE,
+  JS_ERROR_TYPE,
   JS_BOUND_FUNCTION_TYPE,
   JS_FUNCTION_TYPE,  // LAST_JS_OBJECT_TYPE, LAST_JS_RECEIVER_TYPE
 
@@ -856,6 +860,7 @@
 class SharedFunctionInfo;
 class StringStream;
 class TypeFeedbackInfo;
+class TypeFeedbackMetadata;
 class TypeFeedbackVector;
 class WeakCell;
 class TransitionArray;
@@ -883,134 +888,136 @@
   V(Primitive)              \
   V(Number)
 
-#define HEAP_OBJECT_TYPE_LIST(V)   \
-  V(HeapNumber)                    \
-  V(MutableHeapNumber)             \
-  V(Simd128Value)                  \
-  V(Float32x4)                     \
-  V(Int32x4)                       \
-  V(Uint32x4)                      \
-  V(Bool32x4)                      \
-  V(Int16x8)                       \
-  V(Uint16x8)                      \
-  V(Bool16x8)                      \
-  V(Int8x16)                       \
-  V(Uint8x16)                      \
-  V(Bool8x16)                      \
-  V(Name)                          \
-  V(UniqueName)                    \
-  V(String)                        \
-  V(SeqString)                     \
-  V(ExternalString)                \
-  V(ConsString)                    \
-  V(SlicedString)                  \
-  V(ExternalTwoByteString)         \
-  V(ExternalOneByteString)         \
-  V(SeqTwoByteString)              \
-  V(SeqOneByteString)              \
-  V(InternalizedString)            \
-  V(Symbol)                        \
-                                   \
-  V(FixedTypedArrayBase)           \
-  V(FixedUint8Array)               \
-  V(FixedInt8Array)                \
-  V(FixedUint16Array)              \
-  V(FixedInt16Array)               \
-  V(FixedUint32Array)              \
-  V(FixedInt32Array)               \
-  V(FixedFloat32Array)             \
-  V(FixedFloat64Array)             \
-  V(FixedUint8ClampedArray)        \
-  V(ByteArray)                     \
-  V(BytecodeArray)                 \
-  V(FreeSpace)                     \
-  V(JSReceiver)                    \
-  V(JSObject)                      \
-  V(JSContextExtensionObject)      \
-  V(JSGeneratorObject)             \
-  V(JSModule)                      \
-  V(Map)                           \
-  V(DescriptorArray)               \
-  V(TransitionArray)               \
-  V(LiteralsArray)                 \
-  V(TypeFeedbackMetadata)          \
-  V(TypeFeedbackVector)            \
-  V(DeoptimizationInputData)       \
-  V(DeoptimizationOutputData)      \
-  V(DependentCode)                 \
-  V(HandlerTable)                  \
-  V(FixedArray)                    \
-  V(FixedDoubleArray)              \
-  V(WeakFixedArray)                \
-  V(ArrayList)                     \
-  V(Context)                       \
-  V(ScriptContextTable)            \
-  V(NativeContext)                 \
-  V(ScopeInfo)                     \
-  V(JSBoundFunction)               \
-  V(JSFunction)                    \
-  V(Code)                          \
-  V(AbstractCode)                  \
-  V(Oddball)                       \
-  V(SharedFunctionInfo)            \
-  V(JSValue)                       \
-  V(JSDate)                        \
-  V(JSMessageObject)               \
-  V(StringWrapper)                 \
-  V(Foreign)                       \
-  V(Boolean)                       \
-  V(JSArray)                       \
-  V(JSArrayBuffer)                 \
-  V(JSArrayBufferView)             \
-  V(JSTypedArray)                  \
-  V(JSDataView)                    \
-  V(JSProxy)                       \
-  V(JSSet)                         \
-  V(JSMap)                         \
-  V(JSSetIterator)                 \
-  V(JSMapIterator)                 \
-  V(JSWeakCollection)              \
-  V(JSWeakMap)                     \
-  V(JSWeakSet)                     \
-  V(JSRegExp)                      \
-  V(HashTable)                     \
-  V(Dictionary)                    \
-  V(StringTable)                   \
-  V(StringSet)                     \
-  V(NormalizedMapCache)            \
-  V(CompilationCacheTable)         \
-  V(CodeCacheHashTable)            \
-  V(MapCache)                      \
-  V(JSGlobalObject)                \
-  V(JSGlobalProxy)                 \
-  V(Undetectable)                  \
-  V(AccessCheckNeeded)             \
-  V(Callable)                      \
-  V(Function)                      \
-  V(Constructor)                   \
-  V(TemplateInfo)                  \
-  V(Filler)                        \
-  V(FixedArrayBase)                \
-  V(External)                      \
-  V(Struct)                        \
-  V(Cell)                          \
-  V(PropertyCell)                  \
-  V(WeakCell)                      \
-  V(ObjectHashTable)               \
-  V(WeakHashTable)                 \
+#define HEAP_OBJECT_TYPE_LIST(V) \
+  V(HeapNumber)                  \
+  V(MutableHeapNumber)           \
+  V(Simd128Value)                \
+  V(Float32x4)                   \
+  V(Int32x4)                     \
+  V(Uint32x4)                    \
+  V(Bool32x4)                    \
+  V(Int16x8)                     \
+  V(Uint16x8)                    \
+  V(Bool16x8)                    \
+  V(Int8x16)                     \
+  V(Uint8x16)                    \
+  V(Bool8x16)                    \
+  V(Name)                        \
+  V(UniqueName)                  \
+  V(String)                      \
+  V(SeqString)                   \
+  V(ExternalString)              \
+  V(ConsString)                  \
+  V(SlicedString)                \
+  V(ExternalTwoByteString)       \
+  V(ExternalOneByteString)       \
+  V(SeqTwoByteString)            \
+  V(SeqOneByteString)            \
+  V(InternalizedString)          \
+  V(Symbol)                      \
+                                 \
+  V(FixedTypedArrayBase)         \
+  V(FixedUint8Array)             \
+  V(FixedInt8Array)              \
+  V(FixedUint16Array)            \
+  V(FixedInt16Array)             \
+  V(FixedUint32Array)            \
+  V(FixedInt32Array)             \
+  V(FixedFloat32Array)           \
+  V(FixedFloat64Array)           \
+  V(FixedUint8ClampedArray)      \
+  V(ByteArray)                   \
+  V(BytecodeArray)               \
+  V(FreeSpace)                   \
+  V(JSReceiver)                  \
+  V(JSObject)                    \
+  V(JSContextExtensionObject)    \
+  V(JSGeneratorObject)           \
+  V(JSModule)                    \
+  V(Map)                         \
+  V(DescriptorArray)             \
+  V(TransitionArray)             \
+  V(LiteralsArray)               \
+  V(TypeFeedbackMetadata)        \
+  V(TypeFeedbackVector)          \
+  V(DeoptimizationInputData)     \
+  V(DeoptimizationOutputData)    \
+  V(DependentCode)               \
+  V(HandlerTable)                \
+  V(FixedArray)                  \
+  V(FixedDoubleArray)            \
+  V(WeakFixedArray)              \
+  V(ArrayList)                   \
+  V(Context)                     \
+  V(ScriptContextTable)          \
+  V(NativeContext)               \
+  V(ScopeInfo)                   \
+  V(JSBoundFunction)             \
+  V(JSFunction)                  \
+  V(Code)                        \
+  V(AbstractCode)                \
+  V(Oddball)                     \
+  V(SharedFunctionInfo)          \
+  V(JSValue)                     \
+  V(JSDate)                      \
+  V(JSMessageObject)             \
+  V(StringWrapper)               \
+  V(Foreign)                     \
+  V(Boolean)                     \
+  V(JSArray)                     \
+  V(JSArrayBuffer)               \
+  V(JSArrayBufferView)           \
+  V(JSTypedArray)                \
+  V(JSDataView)                  \
+  V(JSProxy)                     \
+  V(JSError)                     \
+  V(JSPromise)                   \
+  V(JSSet)                       \
+  V(JSMap)                       \
+  V(JSSetIterator)               \
+  V(JSMapIterator)               \
+  V(JSWeakCollection)            \
+  V(JSWeakMap)                   \
+  V(JSWeakSet)                   \
+  V(JSRegExp)                    \
+  V(HashTable)                   \
+  V(Dictionary)                  \
+  V(StringTable)                 \
+  V(StringSet)                   \
+  V(NormalizedMapCache)          \
+  V(CompilationCacheTable)       \
+  V(CodeCacheHashTable)          \
+  V(MapCache)                    \
+  V(JSGlobalObject)              \
+  V(JSGlobalProxy)               \
+  V(Undetectable)                \
+  V(AccessCheckNeeded)           \
+  V(Callable)                    \
+  V(Function)                    \
+  V(Constructor)                 \
+  V(TemplateInfo)                \
+  V(Filler)                      \
+  V(FixedArrayBase)              \
+  V(External)                    \
+  V(Struct)                      \
+  V(Cell)                        \
+  V(PropertyCell)                \
+  V(WeakCell)                    \
+  V(ObjectHashTable)             \
+  V(WeakHashTable)               \
   V(OrderedHashTable)
 
-#define ODDBALL_LIST(V) \
-  V(Undefined)          \
-  V(Null)               \
-  V(TheHole)            \
-  V(Exception)          \
-  V(Uninitialized)      \
-  V(True)               \
-  V(False)              \
-  V(ArgumentsMarker)    \
-  V(OptimizedOut)       \
-  V(StaleRegister)
+#define ODDBALL_LIST(V)                 \
+  V(Undefined, undefined_value)         \
+  V(Null, null_value)                   \
+  V(TheHole, the_hole_value)            \
+  V(Exception, exception)               \
+  V(Uninitialized, uninitialized_value) \
+  V(True, true_value)                   \
+  V(False, false_value)                 \
+  V(ArgumentsMarker, arguments_marker)  \
+  V(OptimizedOut, optimized_out)        \
+  V(StaleRegister, stale_register)
 
 // The element types selection for CreateListFromArrayLike.
 enum class ElementTypes { kAll, kStringAndSymbol };
@@ -1026,9 +1033,12 @@
   // Type testing.
   bool IsObject() const { return true; }
 
-#define IS_TYPE_FUNCTION_DECL(type_)  INLINE(bool Is##type_() const);
+#define IS_TYPE_FUNCTION_DECL(Type) INLINE(bool Is##Type() const);
   OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
   HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
+#undef IS_TYPE_FUNCTION_DECL
+#define IS_TYPE_FUNCTION_DECL(Type, Value) \
+  INLINE(bool Is##Type(Isolate* isolate) const);
   ODDBALL_LIST(IS_TYPE_FUNCTION_DECL)
 #undef IS_TYPE_FUNCTION_DECL
 
@@ -1072,7 +1082,6 @@
   INLINE(bool IsUnseededNumberDictionary() const);
   INLINE(bool IsOrderedHashSet() const);
   INLINE(bool IsOrderedHashMap() const);
-  static bool IsPromise(Handle<Object> object);
 
   // Extract the number.
   inline double Number() const;
@@ -1177,8 +1186,9 @@
   MUST_USE_RESULT static MaybeHandle<FixedArray> CreateListFromArrayLike(
       Isolate* isolate, Handle<Object> object, ElementTypes element_types);
 
-  // Check whether |object| is an instance of Error or NativeError.
-  static bool IsErrorObject(Isolate* isolate, Handle<Object> object);
+  // Get length property and apply ToLength.
+  MUST_USE_RESULT static MaybeHandle<Object> GetLengthFromArrayLike(
+      Isolate* isolate, Handle<Object> object);
 
   // ES6 section 12.5.6 The typeof Operator
   static Handle<String> TypeOf(Isolate* isolate, Handle<Object> object);
@@ -1317,15 +1327,10 @@
   // undefined if not yet created.
   Object* GetHash();
 
-  // Returns undefined for JSObjects, but returns the hash code for simple
-  // objects.  This avoids a double lookup in the cases where we know we will
-  // add the hash to the JSObject if it does not already exist.
-  Object* GetSimpleHash();
-
   // Returns the permanent hash code associated with this object depending on
   // the actual object type. May create and store a hash code if needed and none
   // exists.
-  static Handle<Smi> GetOrCreateHash(Isolate* isolate, Handle<Object> object);
+  static Smi* GetOrCreateHash(Isolate* isolate, Handle<Object> object);
 
   // Checks whether this object has the same value as the given one.  This
   // function is implemented according to ES5, section 9.12 and can be used
@@ -1546,10 +1551,15 @@
   // Convenience method to get current isolate.
   inline Isolate* GetIsolate() const;
 
-#define IS_TYPE_FUNCTION_DECL(type_) INLINE(bool Is##type_() const);
+#define IS_TYPE_FUNCTION_DECL(Type) INLINE(bool Is##Type() const);
   HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
+#undef IS_TYPE_FUNCTION_DECL
+
+#define IS_TYPE_FUNCTION_DECL(Type, Value) \
+  INLINE(bool Is##Type(Isolate* isolate) const);
   ODDBALL_LIST(IS_TYPE_FUNCTION_DECL)
 #undef IS_TYPE_FUNCTION_DECL
+
 #define DECLARE_STRUCT_PREDICATE(NAME, Name, name) \
   INLINE(bool Is##Name() const);
   STRUCT_LIST(DECLARE_STRUCT_PREDICATE)
@@ -1794,10 +1804,13 @@
   ACCESSOR_SETTER
 };
 
+enum class GetKeysConversion { kKeepNumbers, kConvertToString };
 
-enum GetKeysConversion { KEEP_NUMBERS, CONVERT_TO_STRING };
-
-enum KeyCollectionType { OWN_ONLY, INCLUDE_PROTOS };
+enum class KeyCollectionMode {
+  kOwnOnly = static_cast<int>(v8::KeyCollectionMode::kOwnOnly),
+  kIncludePrototypes =
+      static_cast<int>(v8::KeyCollectionMode::kIncludePrototypes)
+};
 
 // JSReceiver includes types on which properties can be defined, i.e.,
 // JSObject and JSProxy.
@@ -1928,10 +1941,6 @@
   // Returns the class name ([[Class]] property in the specification).
   String* class_name();
 
-  // Returns the builtin string tag used in Object.prototype.toString.
-  MUST_USE_RESULT static MaybeHandle<String> BuiltinStringTag(
-      Handle<JSReceiver> object);
-
   // Returns the constructor name (the name (possibly, inferred name) of the
   // function that was used to instantiate the object).
   static Handle<String> GetConstructorName(Handle<JSReceiver> receiver);
@@ -1966,26 +1975,17 @@
 
   // Retrieves a permanent object identity hash code. The undefined value might
   // be returned in case no hash was created yet.
-  static inline Handle<Object> GetIdentityHash(Isolate* isolate,
-                                               Handle<JSReceiver> object);
+  static inline Object* GetIdentityHash(Isolate* isolate,
+                                        Handle<JSReceiver> object);
 
   // Retrieves a permanent object identity hash code. May create and store a
   // hash code if needed and none exists.
-  inline static Handle<Smi> GetOrCreateIdentityHash(
-      Handle<JSReceiver> object);
+  inline static Smi* GetOrCreateIdentityHash(Isolate* isolate,
+                                             Handle<JSReceiver> object);
 
   // ES6 [[OwnPropertyKeys]] (modulo return type)
-  MUST_USE_RESULT static MaybeHandle<FixedArray> OwnPropertyKeys(
-      Handle<JSReceiver> object) {
-    return GetKeys(object, OWN_ONLY, ALL_PROPERTIES, CONVERT_TO_STRING);
-  }
-
-  // Computes the enumerable keys for a JSObject. Used for implementing
-  // "for (n in object) { }".
-  MUST_USE_RESULT static MaybeHandle<FixedArray> GetKeys(
-      Handle<JSReceiver> object, KeyCollectionType type, PropertyFilter filter,
-      GetKeysConversion keys_conversion = KEEP_NUMBERS,
-      bool filter_proxy_keys_ = true);
+  MUST_USE_RESULT static inline MaybeHandle<FixedArray> OwnPropertyKeys(
+      Handle<JSReceiver> object);
 
   MUST_USE_RESULT static MaybeHandle<FixedArray> GetOwnValues(
       Handle<JSReceiver> object, PropertyFilter filter);
@@ -2173,6 +2173,8 @@
   static void OptimizeAsPrototype(Handle<JSObject> object,
                                   PrototypeOptimizationMode mode);
   static void ReoptimizeIfPrototype(Handle<JSObject> object);
+  static void MakePrototypesFast(Handle<Object> receiver,
+                                 WhereToStart where_to_start, Isolate* isolate);
   static void LazyRegisterPrototypeUser(Handle<Map> user, Isolate* isolate);
   static void UpdatePrototypeUserRegistration(Handle<Map> old_map,
                                               Handle<Map> new_map,
@@ -2491,10 +2493,10 @@
                                     ElementsKind kind,
                                     Object* object);
 
-  static Handle<Object> GetIdentityHash(Isolate* isolate,
-                                        Handle<JSObject> object);
+  static Object* GetIdentityHash(Isolate* isolate, Handle<JSObject> object);
 
-  static Handle<Smi> GetOrCreateIdentityHash(Handle<JSObject> object);
+  static Smi* GetOrCreateIdentityHash(Isolate* isolate,
+                                      Handle<JSObject> object);
 
   // Helper for fast versions of preventExtensions, seal, and freeze.
   // attrs is one of NONE, SEALED, or FROZEN (depending on the operation).
@@ -3152,7 +3154,7 @@
   // Tells whether k is a real key.  The hole and undefined are not allowed
   // as keys and can be used to indicate missing or deleted elements.
   inline bool IsKey(Object* k);
-  inline bool IsKey(Heap* heap, Object* k);
+  inline bool IsKey(Isolate* isolate, Object* k);
 
   // Compute the probe offset (quadratic probing).
   INLINE(static uint32_t GetProbeOffset(uint32_t n)) {
@@ -3194,6 +3196,8 @@
 template <typename Derived, typename Shape, typename Key>
 class HashTable : public HashTableBase {
  public:
+  typedef Shape ShapeT;
+
   // Wrapper methods
   inline uint32_t Hash(Key key) {
     if (Shape::UsesSeed) {
@@ -3232,10 +3236,12 @@
   void Rehash(Key key);
 
   // Returns the key at entry.
-  Object* KeyAt(int entry) { return get(EntryToIndex(entry)); }
+  Object* KeyAt(int entry) { return get(EntryToIndex(entry) + kEntryKeyIndex); }
 
   static const int kElementsStartIndex = kPrefixStartIndex + Shape::kPrefixSize;
   static const int kEntrySize = Shape::kEntrySize;
+  STATIC_ASSERT(kEntrySize > 0);
+  static const int kEntryKeyIndex = 0;
   static const int kElementsStartOffset =
       kHeaderSize + kElementsStartIndex * kPointerSize;
   static const int kCapacityOffset =
@@ -3264,7 +3270,7 @@
       PretenureFlag pretenure = NOT_TENURED);
 
   // Returns true if this table has sufficient capacity for adding n elements.
-  bool HasSufficientCapacity(int n);
+  bool HasSufficientCapacityToAdd(int number_of_additional_elements);
 
   // Sets the capacity of the hash table.
   void SetCapacity(int capacity) {
@@ -3480,6 +3486,9 @@
   static Handle<Derived> EnsureCapacity(Handle<Derived> obj, int n, Key key);
 
 #ifdef OBJECT_PRINT
+  // For our gdb macros, we should perhaps change these in the future.
+  void Print();
+
   void Print(std::ostream& os);  // NOLINT
 #endif
   // Returns the key (slow).
@@ -3547,15 +3556,16 @@
   static inline PropertyDetails DetailsAt(Dictionary* dict, int entry) {
     STATIC_ASSERT(Dictionary::kEntrySize == 3);
     DCHECK(entry >= 0);  // Not found is -1, which is not caught by get().
-    return PropertyDetails(
-        Smi::cast(dict->get(Dictionary::EntryToIndex(entry) + 2)));
+    return PropertyDetails(Smi::cast(dict->get(
+        Dictionary::EntryToIndex(entry) + Dictionary::kEntryDetailsIndex)));
   }
 
   template <typename Dictionary>
   static inline void DetailsAtPut(Dictionary* dict, int entry,
                                   PropertyDetails value) {
     STATIC_ASSERT(Dictionary::kEntrySize == 3);
-    dict->set(Dictionary::EntryToIndex(entry) + 2, value.AsSmi());
+    dict->set(Dictionary::EntryToIndex(entry) + Dictionary::kEntryDetailsIndex,
+              value.AsSmi());
   }
 
   template <typename Dictionary>
@@ -3577,6 +3587,8 @@
   static inline Handle<Object> AsHandle(Isolate* isolate, Handle<Name> key);
   static const int kPrefixSize = 2;
   static const int kEntrySize = 3;
+  static const int kEntryValueIndex = 1;
+  static const int kEntryDetailsIndex = 2;
   static const bool kIsEnumerable = true;
 };
 
@@ -3591,6 +3603,9 @@
 
   inline static Handle<FixedArray> DoGenerateNewEnumerationIndices(
       Handle<NameDictionary> dictionary);
+
+  static const int kEntryValueIndex = 1;
+  static const int kEntryDetailsIndex = 2;
 };
 
 
@@ -3618,6 +3633,8 @@
     : public NameDictionaryBase<GlobalDictionary, GlobalDictionaryShape> {
  public:
   DECLARE_CAST(GlobalDictionary)
+
+  static const int kEntryValueIndex = 1;
 };
 
 
@@ -3691,6 +3708,9 @@
   // requires_slow_elements returns false.
   inline uint32_t max_number_key();
 
+  static const int kEntryValueIndex = 1;
+  static const int kEntryDetailsIndex = 2;
+
   // Bit masks.
   static const int kRequiresSlowElementsMask = 1;
   static const int kRequiresSlowElementsTagSize = 1;
@@ -3721,6 +3741,9 @@
       Handle<UnseededNumberDictionary> dictionary,
       uint32_t key,
       Handle<Object> value);
+
+  static const int kEntryValueIndex = 1;
+  static const int kEntryDetailsIndex = 2;
 };
 
 
@@ -3872,10 +3895,10 @@
     return Smi::cast(entry)->value();
   }
 
-  int KeyToFirstEntry(Object* key) {
+  int KeyToFirstEntry(Isolate* isolate, Object* key) {
     Object* hash = key->GetHash();
     // If the object does not have an identity hash, it was never used as a key
-    if (hash->IsUndefined()) return kNotFound;
+    if (hash->IsUndefined(isolate)) return kNotFound;
     return HashToEntry(Smi::cast(hash)->value());
   }
 
@@ -3884,7 +3907,7 @@
     return Smi::cast(next_entry)->value();
   }
 
-  // use KeyAt(i)->IsTheHole() to determine if this is a deleted entry.
+  // use KeyAt(i)->IsTheHole(isolate) to determine if this is a deleted entry.
   Object* KeyAt(int entry) {
     DCHECK_LT(entry, this->UsedCapacity());
     return get(EntryToIndex(entry));
@@ -3980,6 +4003,8 @@
 
   static Handle<OrderedHashSet> Add(Handle<OrderedHashSet> table,
                                     Handle<Object> value);
+  static Handle<FixedArray> ConvertToKeysArray(Handle<OrderedHashSet> table,
+                                               GetKeysConversion convert);
 };
 
 
@@ -4452,6 +4477,10 @@
 
   inline int instruction_size();
 
+  // Returns the size of bytecode and its metadata. This includes the size of
+  // bytecode, constant pool, source position table, and handler table.
+  inline int SizeIncludingMetadata();
+
   int SourcePosition(int offset);
   int SourceStatementPosition(int offset);
 
@@ -4739,17 +4768,18 @@
  public:
   static const int kVectorIndex = 0;
   static const int kFirstLiteralIndex = 1;
-  static const int kOffsetToFirstLiteral =
-      FixedArray::kHeaderSize + kPointerSize;
+  static const int kFeedbackVectorOffset;
+  static const int kOffsetToFirstLiteral;
 
   static int OffsetOfLiteralAt(int index) {
-    return SizeFor(index + kFirstLiteralIndex);
+    return OffsetOfElementAt(index + kFirstLiteralIndex);
   }
 
   inline TypeFeedbackVector* feedback_vector() const;
   inline void set_feedback_vector(TypeFeedbackVector* vector);
   inline Object* literal(int literal_index) const;
   inline void set_literal(int literal_index, Object* literal);
+  inline void set_literal_undefined(int literal_index);
   inline int literals_count() const;
 
   static Handle<LiteralsArray> New(Isolate* isolate,
@@ -4862,6 +4892,7 @@
 
 #define IC_KIND_LIST(V) \
   V(LOAD_IC)            \
+  V(LOAD_GLOBAL_IC)     \
   V(KEYED_LOAD_IC)      \
   V(CALL_IC)            \
   V(STORE_IC)           \
@@ -4881,10 +4912,6 @@
     NUMBER_OF_KINDS
   };
 
-  // No more than 32 kinds. The value is currently encoded in five bits in
-  // Flags.
-  STATIC_ASSERT(NUMBER_OF_KINDS <= 32);
-
   static const char* Kind2String(Kind kind);
 
   static const int kPrologueOffsetNotSet = -1;
@@ -4958,7 +4985,6 @@
 
   // [flags]: Access to specific code flags.
   inline Kind kind();
-  inline InlineCacheState ic_state();  // Only valid for IC stubs.
   inline ExtraICState extra_ic_state();  // Only valid for IC stubs.
 
   // Testers for IC stub kinds.
@@ -4971,17 +4997,14 @@
   inline bool is_to_boolean_ic_stub();
   inline bool is_optimized_code();
   inline bool is_wasm_code();
-  inline bool embeds_maps_weakly();
 
   inline bool IsCodeStubOrIC();
-  inline bool IsJavaScriptCode();
 
   inline void set_raw_kind_specific_flags1(int value);
   inline void set_raw_kind_specific_flags2(int value);
 
   // Testers for interpreter builtins.
-  inline bool is_interpreter_entry_trampoline();
-  inline bool is_interpreter_enter_bytecode_dispatch();
+  inline bool is_interpreter_trampoline_builtin();
 
   // [is_crankshafted]: For kind STUB or ICs, tells whether or not a code
   // object was generated by either the hydrogen or the TurboFan optimizing
@@ -5096,18 +5119,12 @@
 
   // Flags operations.
   static inline Flags ComputeFlags(
-      Kind kind, InlineCacheState ic_state = UNINITIALIZED,
-      ExtraICState extra_ic_state = kNoExtraICState,
-      CacheHolderFlag holder = kCacheOnReceiver);
-
-  static inline Flags ComputeMonomorphicFlags(
       Kind kind, ExtraICState extra_ic_state = kNoExtraICState,
       CacheHolderFlag holder = kCacheOnReceiver);
 
   static inline Flags ComputeHandlerFlags(
       Kind handler_kind, CacheHolderFlag holder = kCacheOnReceiver);
 
-  static inline InlineCacheState ExtractICStateFromFlags(Flags flags);
   static inline CacheHolderFlag ExtractCacheHolderFromFlags(Flags flags);
   static inline Kind ExtractKindFromFlags(Flags flags);
   static inline ExtraICState ExtractExtraICStateFromFlags(Flags flags);
@@ -5126,12 +5143,60 @@
   // Returns the address right after the last instruction.
   inline byte* instruction_end();
 
-  // Returns the size of the instructions, padding, and relocation information.
+  // Returns the size of the instructions, padding, relocation and unwinding
+  // information.
   inline int body_size();
 
+  // Returns the size of code and its metadata. This includes the size of code
+  // relocation information, deoptimization data and handler table.
+  inline int SizeIncludingMetadata();
+
   // Returns the address of the first relocation info (read backwards!).
   inline byte* relocation_start();
 
+  // [has_unwinding_info]: Whether this code object has unwinding information.
+  // If it doesn't, unwinding_information_start() will point to invalid data.
+  //
+  // The body of all code objects has the following layout.
+  //
+  //  +--------------------------+  <-- instruction_start()
+  //  |       instructions       |
+  //  |           ...            |
+  //  +--------------------------+
+  //  |      relocation info     |
+  //  |           ...            |
+  //  +--------------------------+  <-- instruction_end()
+  //
+  // If has_unwinding_info() is false, instruction_end() points to the first
+  // memory location after the end of the code object. Otherwise, the body
+  // continues as follows:
+  //
+  //  +--------------------------+
+  //  |    padding to the next   |
+  //  |  8-byte aligned address  |
+  //  +--------------------------+  <-- instruction_end()
+  //  |   [unwinding_info_size]  |
+  //  |        as uint64_t       |
+  //  +--------------------------+  <-- unwinding_info_start()
+  //  |       unwinding info     |
+  //  |            ...           |
+  //  +--------------------------+  <-- unwinding_info_end()
+  //
+  // and unwinding_info_end() points to the first memory location after the end
+  // of the code object.
+  //
+  DECL_BOOLEAN_ACCESSORS(has_unwinding_info)
+
+  // [unwinding_info_size]: Size of the unwinding information.
+  inline int unwinding_info_size() const;
+  inline void set_unwinding_info_size(int value);
+
+  // Returns the address of the unwinding information, if any.
+  inline byte* unwinding_info_start();
+
+  // Returns the address right after the end of the unwinding information.
+  inline byte* unwinding_info_end();
+
   // Code entry point.
   inline byte* entry();
 
@@ -5262,6 +5327,8 @@
   static const int kHeaderSize =
       (kHeaderPaddingStart + kCodeAlignmentMask) & ~kCodeAlignmentMask;
 
+  inline int GetUnwindingInfoSizeOffset() const;
+
   class BodyDescriptor;
 
   // Byte offsets within kKindSpecificFlags1Offset.
@@ -5275,12 +5342,16 @@
   class ProfilerTicksField : public BitField<int, 4, 28> {};
 
   // Flags layout.  BitField<type, shift, size>.
-  class ICStateField : public BitField<InlineCacheState, 0, 3> {};
-  class CacheHolderField : public BitField<CacheHolderFlag, 3, 2> {};
-  class KindField : public BitField<Kind, 5, 5> {};
-  class ExtraICStateField
-      : public BitField<ExtraICState, 10, PlatformSmiTagging::kSmiValueSize -
-                                              10 + 1> {};  // NOLINT
+  class ICStateField : public BitField<InlineCacheState, 0, 2> {};
+  class HasUnwindingInfoField : public BitField<bool, ICStateField::kNext, 1> {
+  };
+  class CacheHolderField
+      : public BitField<CacheHolderFlag, HasUnwindingInfoField::kNext, 2> {};
+  class KindField : public BitField<Kind, CacheHolderField::kNext, 5> {};
+  STATIC_ASSERT(NUMBER_OF_KINDS <= KindField::kMax);
+  class ExtraICStateField : public BitField<ExtraICState, KindField::kNext,
+                                            PlatformSmiTagging::kSmiValueSize -
+                                                KindField::kNext + 1> {};
 
   // KindSpecificFlags1 layout (STUB, BUILTIN and OPTIMIZED_FUNCTION)
   static const int kStackSlotsFirstBit = 0;
@@ -5360,8 +5431,11 @@
     CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
 #undef DEFINE_CODE_KIND_ENUM
         INTERPRETED_FUNCTION,
+    NUMBER_OF_KINDS
   };
 
+  static const char* Kind2String(Kind kind);
+
   int SourcePosition(int offset);
   int SourceStatementPosition(int offset);
 
@@ -5371,9 +5445,12 @@
   // Returns the address right after the last instruction.
   inline Address instruction_end();
 
-  // Returns the of the code instructions.
+  // Returns the size of the code instructions.
   inline int instruction_size();
 
+  // Returns the size of instructions and the metadata.
+  inline int SizeIncludingMetadata();
+
   // Returns true if pc is inside this object's instructions.
   inline bool contains(byte* pc);
 
@@ -5701,6 +5778,7 @@
   inline bool has_fast_double_elements();
   inline bool has_fast_elements();
   inline bool has_sloppy_arguments_elements();
+  inline bool has_fast_sloppy_arguments_elements();
   inline bool has_fast_string_wrapper_elements();
   inline bool has_fixed_typed_array_elements();
   inline bool has_dictionary_elements();
@@ -5729,6 +5807,9 @@
       Handle<JSObject> prototype, Isolate* isolate);
   static Handle<PrototypeInfo> GetOrCreatePrototypeInfo(
       Handle<Map> prototype_map, Isolate* isolate);
+  inline bool should_be_fast_prototype_map() const;
+  static void SetShouldBeFastPrototypeMap(Handle<Map> map, bool value,
+                                          Isolate* isolate);
 
   // [prototype chain validity cell]: Associated with a prototype object,
   // stored in that object's map's PrototypeInfo, indicates that prototype
@@ -5831,7 +5912,7 @@
                                     LayoutDescriptor* layout_descriptor);
 
   // [stub cache]: contains stubs compiled for this map.
-  DECL_ACCESSORS(code_cache, Object)
+  DECL_ACCESSORS(code_cache, FixedArray)
 
   // [dependent code]: list of optimized codes that weakly embed this map.
   DECL_ACCESSORS(dependent_code, DependentCode)
@@ -6285,6 +6366,13 @@
   // [prototype_users]: WeakFixedArray containing maps using this prototype,
   // or Smi(0) if uninitialized.
   DECL_ACCESSORS(prototype_users, Object)
+
+  // [object_create_map]: A field caching the map for Object.create(prototype).
+  static inline void SetObjectCreateMap(Handle<PrototypeInfo> info,
+                                        Handle<Map> map);
+  inline Map* ObjectCreateMap();
+  inline bool HasObjectCreateMap();
+
   // [registry_slot]: Slot in prototype's user registry where this user
   // is stored. Returns UNREGISTERED if this prototype has not been registered.
   inline int registry_slot() const;
@@ -6296,6 +6384,11 @@
   // given receiver embed the currently valid cell for that receiver's prototype
   // during their compilation and check it on execution.
   DECL_ACCESSORS(validity_cell, Object)
+  // [bit_field]
+  inline int bit_field() const;
+  inline void set_bit_field(int bit_field);
+
+  DECL_BOOLEAN_ACCESSORS(should_be_fast_map)
 
   DECLARE_CAST(PrototypeInfo)
 
@@ -6306,10 +6399,16 @@
   static const int kPrototypeUsersOffset = HeapObject::kHeaderSize;
   static const int kRegistrySlotOffset = kPrototypeUsersOffset + kPointerSize;
   static const int kValidityCellOffset = kRegistrySlotOffset + kPointerSize;
-  static const int kConstructorNameOffset = kValidityCellOffset + kPointerSize;
-  static const int kSize = kConstructorNameOffset + kPointerSize;
+  static const int kObjectCreateMap = kValidityCellOffset + kPointerSize;
+  static const int kBitFieldOffset = kObjectCreateMap + kPointerSize;
+  static const int kSize = kBitFieldOffset + kPointerSize;
+
+  // Bit field usage.
+  static const int kShouldBeFastBit = 0;
 
  private:
+  DECL_ACCESSORS(object_create_map, Object)
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(PrototypeInfo);
 };
 
@@ -6439,14 +6538,6 @@
   // resource is accessible. Otherwise, always return true.
   inline bool HasValidSource();
 
-  // Convert code offset into column number.
-  static int GetColumnNumber(Handle<Script> script, int code_offset);
-
-  // Convert code offset into (zero-based) line number.
-  // The non-handlified version does not allocate, but may be much slower.
-  static int GetLineNumber(Handle<Script> script, int code_offset);
-  int GetLineNumber(int code_pos);
-
   static Handle<Object> GetNameOrSourceURL(Handle<Script> script);
 
   // Set eval origin for stack trace formatting.
@@ -6459,6 +6550,33 @@
   // Init line_ends array with source code positions of line ends.
   static void InitLineEnds(Handle<Script> script);
 
+  // Convert code offset into column number.
+  static int GetColumnNumber(Handle<Script> script, int code_offset);
+
+  // Convert code offset into (zero-based) line number.
+  // The non-handlified version does not allocate, but may be much slower.
+  static int GetLineNumber(Handle<Script> script, int code_offset);
+  int GetLineNumber(int code_pos);
+
+  // Carries information about a source position.
+  struct PositionInfo {
+    PositionInfo() : line(-1), column(-1), line_start(-1), line_end(-1) {}
+
+    int line;        // Zero-based line number.
+    int column;      // Zero-based column number.
+    int line_start;  // Position of first character in line.
+    int line_end;    // Position of last (non-linebreak) character in line.
+  };
+
+  // Specifies whether to add offsets to position infos.
+  enum OffsetFlag { NO_OFFSET = 0, WITH_OFFSET = 1 };
+
+  // Retrieves information about the given position, optionally with an offset.
+  // Returns false on failure, and otherwise writes into the given info object
+  // on success.
+  bool GetPositionInfo(int position, PositionInfo* info,
+                       OffsetFlag offset_flag);
+
   // Get the JS object wrapping the given script; create it if none exists.
   static Handle<JSObject> GetWrapper(Handle<Script> script);
 
@@ -6546,7 +6664,12 @@
   V(Math, ceil, MathCeil)                                   \
   V(Math, abs, MathAbs)                                     \
   V(Math, log, MathLog)                                     \
+  V(Math, log1p, MathLog1p)                                 \
+  V(Math, log2, MathLog2)                                   \
+  V(Math, log10, MathLog10)                                 \
+  V(Math, cbrt, MathCbrt)                                   \
   V(Math, exp, MathExp)                                     \
+  V(Math, expm1, MathExpm1)                                 \
   V(Math, sqrt, MathSqrt)                                   \
   V(Math, pow, MathPow)                                     \
   V(Math, max, MathMax)                                     \
@@ -6558,6 +6681,7 @@
   V(Math, asin, MathAsin)                                   \
   V(Math, atan, MathAtan)                                   \
   V(Math, atan2, MathAtan2)                                 \
+  V(Math, atanh, MathAtanh)                                 \
   V(Math, imul, MathImul)                                   \
   V(Math, clz32, MathClz32)                                 \
   V(Math, fround, MathFround)                               \
@@ -6576,7 +6700,14 @@
 #undef DECLARE_FUNCTION_ID
   // Fake id for a special case of Math.pow. Note, it continues the
   // list of math functions.
-  kMathPowHalf
+  kMathPowHalf,
+  // These are manually assigned to special getters during bootstrapping.
+  kDataViewBuffer,
+  kDataViewByteLength,
+  kDataViewByteOffset,
+  kTypedArrayByteLength,
+  kTypedArrayByteOffset,
+  kTypedArrayLength,
 };
 
 
@@ -6618,6 +6749,9 @@
   // Clear optimized code map.
   void ClearOptimizedCodeMap();
 
+  // Like ClearOptimizedCodeMap, but preserves literals.
+  void ClearCodeFromOptimizedCodeMap();
+
   // We have a special root FixedArray with the right shape and values
   // to represent the cleared optimized code map. This predicate checks
   // if that root is installed.
@@ -6631,6 +6765,9 @@
   // Trims the optimized code map after entries have been removed.
   void TrimOptimizedCodeMap(int shrink_by);
 
+  static Handle<LiteralsArray> FindOrCreateLiterals(
+      Handle<SharedFunctionInfo> shared, Handle<Context> native_context);
+
   // Add or update entry in the optimized code map for context-independent code.
   static void AddSharedCodeToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
                                               Handle<Code> code);
@@ -6700,16 +6837,10 @@
   inline int expected_nof_properties() const;
   inline void set_expected_nof_properties(int value);
 
-  // [feedback_vector] - accumulates ast node feedback from full-codegen and
+  // [feedback_metadata] - describes ast node feedback from full-codegen and
   // (increasingly) from crankshafted code where sufficient feedback isn't
   // available.
-  DECL_ACCESSORS(feedback_vector, TypeFeedbackVector)
-
-  // Unconditionally clear the type feedback vector (including vector ICs).
-  void ClearTypeFeedbackInfo();
-
-  // Clear the type feedback vector with a more subtle policy at GC time.
-  void ClearTypeFeedbackInfoAtGCTime();
+  DECL_ACCESSORS(feedback_metadata, TypeFeedbackMetadata)
 
 #if TRACE_MAPS
   // [unique_id] - For --trace-maps purposes, an identifier that's persistent
@@ -7028,15 +7159,15 @@
   static const int kScriptOffset = kFunctionDataOffset + kPointerSize;
   static const int kDebugInfoOffset = kScriptOffset + kPointerSize;
   static const int kFunctionIdentifierOffset = kDebugInfoOffset + kPointerSize;
-  static const int kFeedbackVectorOffset =
+  static const int kFeedbackMetadataOffset =
       kFunctionIdentifierOffset + kPointerSize;
 #if TRACE_MAPS
-  static const int kUniqueIdOffset = kFeedbackVectorOffset + kPointerSize;
+  static const int kUniqueIdOffset = kFeedbackMetadataOffset + kPointerSize;
   static const int kLastPointerFieldOffset = kUniqueIdOffset;
 #else
   // Just to not break the postmortrem support with conditional offsets
-  static const int kUniqueIdOffset = kFeedbackVectorOffset;
-  static const int kLastPointerFieldOffset = kFeedbackVectorOffset;
+  static const int kUniqueIdOffset = kFeedbackMetadataOffset;
+  static const int kLastPointerFieldOffset = kFeedbackMetadataOffset;
 #endif
 
 #if V8_HOST_ARCH_32_BIT
@@ -7308,23 +7439,32 @@
   // [receiver]: The receiver of the suspended computation.
   DECL_ACCESSORS(receiver, Object)
 
-  // [input]: The most recent input value.
-  DECL_ACCESSORS(input, Object)
+  // [input_or_debug_pos]
+  // For executing generators: the most recent input value.
+  // For suspended new-style generators: debug information (bytecode offset).
+  // For suspended old-style generators: unused.
+  // There is currently no need to remember the most recent input value for a
+  // suspended generator.
+  DECL_ACCESSORS(input_or_debug_pos, Object)
 
   // [resume_mode]: The most recent resume mode.
   enum ResumeMode { kNext, kReturn, kThrow };
   DECL_INT_ACCESSORS(resume_mode)
 
-  // [continuation]: Offset into code of continuation.
+  // [continuation]
   //
-  // A positive offset indicates a suspended generator.  The special
+  // A positive value indicates a suspended generator.  The special
   // kGeneratorExecuting and kGeneratorClosed values indicate that a generator
   // cannot be resumed.
   inline int continuation() const;
   inline void set_continuation(int continuation);
-  inline bool is_closed();
-  inline bool is_executing();
-  inline bool is_suspended();
+  inline bool is_closed() const;
+  inline bool is_executing() const;
+  inline bool is_suspended() const;
+
+  // For suspended generators: the source position at which the generator
+  // is suspended.
+  int source_position() const;
 
   // [operand_stack]: Saved operand stack.
   DECL_ACCESSORS(operand_stack, FixedArray)
@@ -7342,8 +7482,8 @@
   static const int kFunctionOffset = JSObject::kHeaderSize;
   static const int kContextOffset = kFunctionOffset + kPointerSize;
   static const int kReceiverOffset = kContextOffset + kPointerSize;
-  static const int kInputOffset = kReceiverOffset + kPointerSize;
-  static const int kResumeModeOffset = kInputOffset + kPointerSize;
+  static const int kInputOrDebugPosOffset = kReceiverOffset + kPointerSize;
+  static const int kResumeModeOffset = kInputOrDebugPosOffset + kPointerSize;
   static const int kContinuationOffset = kResumeModeOffset + kPointerSize;
   static const int kOperandStackOffset = kContinuationOffset + kPointerSize;
   static const int kSize = kOperandStackOffset + kPointerSize;
@@ -7428,6 +7568,9 @@
   // can be shared by instances.
   DECL_ACCESSORS(shared, SharedFunctionInfo)
 
+  static const int kLengthDescriptorIndex = 0;
+  static const int kNameDescriptorIndex = 1;
+
   // [context]: The context for this function.
   inline Context* context();
   inline void set_context(Object* context);
@@ -7486,6 +7629,15 @@
   // access to.
   DECL_ACCESSORS(literals, LiteralsArray)
 
+  static void EnsureLiterals(Handle<JSFunction> function);
+  inline TypeFeedbackVector* feedback_vector();
+
+  // Unconditionally clear the type feedback vector (including vector ICs).
+  void ClearTypeFeedbackInfo();
+
+  // Clear the type feedback vector with a more subtle policy at GC time.
+  void ClearTypeFeedbackInfoAtGCTime();
+
   // The initial map for an object created by this constructor.
   inline Map* initial_map();
   static void SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
@@ -7569,9 +7721,6 @@
   DECLARE_PRINTER(JSFunction)
   DECLARE_VERIFIER(JSFunction)
 
-  // Returns the number of allocated literals.
-  inline int NumberOfLiterals();
-
   // The function's name if it is configured, otherwise shared function info
   // debug name.
   static Handle<String> GetName(Handle<JSFunction> function);
@@ -7876,7 +8025,6 @@
   // NOT_COMPILED: Initial value. No data has been stored in the JSRegExp yet.
   // ATOM: A simple string to match against using an indexOf operation.
   // IRREGEXP: Compiled with Irregexp.
-  // IRREGEXP_NATIVE: Compiled to native code with Irregexp.
   enum Type { NOT_COMPILED, ATOM, IRREGEXP };
   enum Flag {
     kNone = 0,
@@ -7969,8 +8117,11 @@
   static const int kIrregexpMaxRegisterCountIndex = kDataIndex + 4;
   // Number of captures in the compiled regexp.
   static const int kIrregexpCaptureCountIndex = kDataIndex + 5;
+  // Maps names of named capture groups (at indices 2i) to their corresponding
+  // capture group indices (at indices 2i + 1).
+  static const int kIrregexpCaptureNameMapIndex = kDataIndex + 6;
 
-  static const int kIrregexpDataSize = kIrregexpCaptureCountIndex + 1;
+  static const int kIrregexpDataSize = kIrregexpCaptureNameMapIndex + 1;
 
   // Offsets directly into the data fixed array.
   static const int kDataTagOffset =
@@ -8105,9 +8256,6 @@
 
   DECLARE_CAST(CodeCacheHashTable)
 
-  // Initial size of the fixed array backing the hash table.
-  static const int kInitialSize = 16;
-
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(CodeCacheHashTable);
 };
@@ -8498,6 +8646,8 @@
   // Return a string version of this name that is converted according to the
   // rules described in ES6 section 9.2.11.
   MUST_USE_RESULT static MaybeHandle<String> ToFunctionName(Handle<Name> name);
+  MUST_USE_RESULT static MaybeHandle<String> ToFunctionName(
+      Handle<Name> name, Handle<String> prefix);
 
   DECLARE_CAST(Name)
 
@@ -8535,6 +8685,10 @@
   // Array index strings this short can keep their index in the hash field.
   static const int kMaxCachedArrayIndexLength = 7;
 
+  // Maximum number of characters to consider when trying to convert a string
+  // value into an array index.
+  static const int kMaxArrayIndexSize = 10;
+
   // For strings which are array indexes the hash value has the string length
   // mixed into the hash, mainly to avoid a hash value of zero which would be
   // the case for the string '0'. 24 bits are used for the array index value.
@@ -8542,7 +8696,8 @@
   static const int kArrayIndexLengthBits =
       kBitsPerInt - kArrayIndexValueBits - kNofHashBitFields;
 
-  STATIC_ASSERT((kArrayIndexLengthBits > 0));
+  STATIC_ASSERT(kArrayIndexLengthBits > 0);
+  STATIC_ASSERT(kMaxArrayIndexSize < (1 << kArrayIndexLengthBits));
 
   class ArrayIndexValueBits : public BitField<unsigned int, kNofHashBitFields,
       kArrayIndexValueBits> {};  // NOLINT
@@ -8632,34 +8787,6 @@
  public:
   enum Encoding { ONE_BYTE_ENCODING, TWO_BYTE_ENCODING };
 
-  // Array index strings this short can keep their index in the hash field.
-  static const int kMaxCachedArrayIndexLength = 7;
-
-  // For strings which are array indexes the hash value has the string length
-  // mixed into the hash, mainly to avoid a hash value of zero which would be
-  // the case for the string '0'. 24 bits are used for the array index value.
-  static const int kArrayIndexValueBits = 24;
-  static const int kArrayIndexLengthBits =
-      kBitsPerInt - kArrayIndexValueBits - kNofHashBitFields;
-
-  STATIC_ASSERT((kArrayIndexLengthBits > 0));
-
-  class ArrayIndexValueBits : public BitField<unsigned int, kNofHashBitFields,
-      kArrayIndexValueBits> {};  // NOLINT
-  class ArrayIndexLengthBits : public BitField<unsigned int,
-      kNofHashBitFields + kArrayIndexValueBits,
-      kArrayIndexLengthBits> {};  // NOLINT
-
-  // Check that kMaxCachedArrayIndexLength + 1 is a power of two so we
-  // could use a mask to test if the length of string is less than or equal to
-  // kMaxCachedArrayIndexLength.
-  STATIC_ASSERT(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1));
-
-  static const unsigned int kContainsCachedArrayIndexMask =
-      (~static_cast<unsigned>(kMaxCachedArrayIndexLength)
-       << ArrayIndexLengthBits::kShift) |
-      kIsNotArrayIndexMask;
-
   class SubStringRange {
    public:
     explicit inline SubStringRange(String* string, int first = 0,
@@ -8845,6 +8972,10 @@
   // Conversion.
   inline bool AsArrayIndex(uint32_t* index);
 
+  // Trimming.
+  enum TrimMode { kTrim, kTrimLeft, kTrimRight };
+  static Handle<String> Trim(Handle<String> string, TrimMode mode);
+
   DECLARE_CAST(String)
 
   void PrintOn(FILE* out);
@@ -8853,7 +8984,7 @@
   bool LooksValid();
 
   // Dispatched behavior.
-  void StringShortPrint(StringStream* accumulator);
+  void StringShortPrint(StringStream* accumulator, bool show_details = true);
   void PrintUC16(std::ostream& os, int start = 0, int end = -1);  // NOLINT
 #if defined(DEBUG) || defined(OBJECT_PRINT)
   char* ToAsciiArray();
@@ -8867,11 +8998,6 @@
   static const int kLengthOffset = Name::kSize;
   static const int kSize = kLengthOffset + kPointerSize;
 
-  // Maximum number of characters to consider when trying to convert a string
-  // value into an array index.
-  static const int kMaxArrayIndexSize = 10;
-  STATIC_ASSERT(kMaxArrayIndexSize < (1 << kArrayIndexLengthBits));
-
   // Max char codes.
   static const int32_t kMaxOneByteCharCode = unibrow::Latin1::kMaxChar;
   static const uint32_t kMaxOneByteCharCodeU = unibrow::Latin1::kMaxChar;
@@ -9686,10 +9812,9 @@
   typedef FixedBodyDescriptor<JSReceiver::kPropertiesOffset, kSize, kSize>
       BodyDescriptor;
 
-  static Handle<Object> GetIdentityHash(Isolate* isolate,
-                                        Handle<JSProxy> receiver);
+  static Object* GetIdentityHash(Handle<JSProxy> receiver);
 
-  static Handle<Smi> GetOrCreateIdentityHash(Handle<JSProxy> proxy);
+  static Smi* GetOrCreateIdentityHash(Isolate* isolate, Handle<JSProxy> proxy);
 
   static Maybe<bool> SetPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
                                         Handle<Symbol> private_name,
@@ -10344,9 +10469,9 @@
 
 class AccessCheckInfo: public Struct {
  public:
-  DECL_ACCESSORS(named_callback, Object)
-  DECL_ACCESSORS(indexed_callback, Object)
   DECL_ACCESSORS(callback, Object)
+  DECL_ACCESSORS(named_interceptor, Object)
+  DECL_ACCESSORS(indexed_interceptor, Object)
   DECL_ACCESSORS(data, Object)
 
   DECLARE_CAST(AccessCheckInfo)
@@ -10355,10 +10480,13 @@
   DECLARE_PRINTER(AccessCheckInfo)
   DECLARE_VERIFIER(AccessCheckInfo)
 
-  static const int kNamedCallbackOffset   = HeapObject::kHeaderSize;
-  static const int kIndexedCallbackOffset = kNamedCallbackOffset + kPointerSize;
-  static const int kCallbackOffset = kIndexedCallbackOffset + kPointerSize;
-  static const int kDataOffset = kCallbackOffset + kPointerSize;
+  static AccessCheckInfo* Get(Isolate* isolate, Handle<JSObject> receiver);
+
+  static const int kCallbackOffset = HeapObject::kHeaderSize;
+  static const int kNamedInterceptorOffset = kCallbackOffset + kPointerSize;
+  static const int kIndexedInterceptorOffset =
+      kNamedInterceptorOffset + kPointerSize;
+  static const int kDataOffset = kIndexedInterceptorOffset + kPointerSize;
   static const int kSize = kDataOffset + kPointerSize;
 
  private:
@@ -10464,6 +10592,7 @@
   DECL_ACCESSORS(signature, Object)
   DECL_ACCESSORS(instance_call_handler, Object)
   DECL_ACCESSORS(access_check_info, Object)
+  DECL_ACCESSORS(shared_function_info, Object)
   DECL_INT_ACCESSORS(flag)
 
   inline int length() const;
@@ -10478,7 +10607,6 @@
   DECL_BOOLEAN_ACCESSORS(read_only_prototype)
   DECL_BOOLEAN_ACCESSORS(remove_prototype)
   DECL_BOOLEAN_ACCESSORS(do_not_cache)
-  DECL_BOOLEAN_ACCESSORS(instantiated)
   DECL_BOOLEAN_ACCESSORS(accept_any_receiver)
 
   DECLARE_CAST(FunctionTemplateInfo)
@@ -10503,17 +10631,18 @@
   static const int kInstanceCallHandlerOffset = kSignatureOffset + kPointerSize;
   static const int kAccessCheckInfoOffset =
       kInstanceCallHandlerOffset + kPointerSize;
-  static const int kFlagOffset = kAccessCheckInfoOffset + kPointerSize;
+  static const int kSharedFunctionInfoOffset =
+      kAccessCheckInfoOffset + kPointerSize;
+  static const int kFlagOffset = kSharedFunctionInfoOffset + kPointerSize;
   static const int kLengthOffset = kFlagOffset + kPointerSize;
   static const int kSize = kLengthOffset + kPointerSize;
 
+  static Handle<SharedFunctionInfo> GetOrCreateSharedFunctionInfo(
+      Isolate* isolate, Handle<FunctionTemplateInfo> info);
   // Returns true if |object| is an instance of this function template.
-  bool IsTemplateFor(Object* object);
+  inline bool IsTemplateFor(JSObject* object);
   bool IsTemplateFor(Map* map);
-
-  // Returns the holder JSObject if the function can legally be called with this
-  // receiver.  Returns Heap::null_value() if the call is illegal.
-  Object* GetCompatibleReceiver(Isolate* isolate, Object* receiver);
+  inline bool instantiated();
 
  private:
   // Bit position in the flag, from least significant bit position.
@@ -10523,8 +10652,7 @@
   static const int kReadOnlyPrototypeBit = 3;
   static const int kRemovePrototypeBit   = 4;
   static const int kDoNotCacheBit        = 5;
-  static const int kInstantiatedBit      = 6;
-  static const int kAcceptAnyReceiver = 7;
+  static const int kAcceptAnyReceiver = 6;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(FunctionTemplateInfo);
 };
diff --git a/src/optimizing-compile-dispatcher.cc b/src/optimizing-compile-dispatcher.cc
index 7077339..c3ec835 100644
--- a/src/optimizing-compile-dispatcher.cc
+++ b/src/optimizing-compile-dispatcher.cc
@@ -19,6 +19,10 @@
   if (restore_function_code) {
     Handle<JSFunction> function = job->info()->closure();
     function->ReplaceCode(function->shared()->code());
+    // TODO(mvstanton): We can't call ensureliterals here due to allocation,
+    // but we probably shouldn't call ReplaceCode either, as this
+    // sometimes runs on the worker thread!
+    // JSFunction::EnsureLiterals(function);
   }
   delete job;
 }
diff --git a/src/ostreams.cc b/src/ostreams.cc
index 120db25..45f41bb 100644
--- a/src/ostreams.cc
+++ b/src/ostreams.cc
@@ -97,5 +97,11 @@
   return PrintUC32(os, c.value, IsPrint);
 }
 
+std::ostream& operator<<(std::ostream& os, const AsHex& hex) {
+  char buf[20];
+  snprintf(buf, sizeof(buf), "%.*" PRIx64, hex.min_width, hex.value);
+  return os << buf;
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/ostreams.h b/src/ostreams.h
index 1c2f38a..977b5c6 100644
--- a/src/ostreams.h
+++ b/src/ostreams.h
@@ -66,6 +66,12 @@
   uint16_t value;
 };
 
+struct AsHex {
+  explicit AsHex(uint64_t v, uint8_t min_width = 0)
+      : value(v), min_width(min_width) {}
+  uint64_t value;
+  uint8_t min_width;
+};
 
 // Writes the given character to the output escaping everything outside of
 // printable/space ASCII range. Additionally escapes '\' making escaping
@@ -83,6 +89,9 @@
 // of printable ASCII range.
 std::ostream& operator<<(std::ostream& os, const AsUC32& c);
 
+// Writes the given number to the output in hexadecimal notation.
+std::ostream& operator<<(std::ostream& os, const AsHex& v);
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/parsing/OWNERS b/src/parsing/OWNERS
index a5daeb3..44cc4ed 100644
--- a/src/parsing/OWNERS
+++ b/src/parsing/OWNERS
@@ -4,4 +4,4 @@
 littledan@chromium.org
 marja@chromium.org
 rossberg@chromium.org
-
+vogelheim@chromium.org
diff --git a/src/parsing/expression-classifier.h b/src/parsing/expression-classifier.h
index 3f70ed8..8e13d0e 100644
--- a/src/parsing/expression-classifier.h
+++ b/src/parsing/expression-classifier.h
@@ -13,35 +13,55 @@
 namespace internal {
 
 
+#define ERROR_CODES(T)                          \
+  T(ExpressionProduction, 0)                    \
+  T(FormalParameterInitializerProduction, 1)    \
+  T(BindingPatternProduction, 2)                \
+  T(AssignmentPatternProduction, 3)             \
+  T(DistinctFormalParametersProduction, 4)      \
+  T(StrictModeFormalParametersProduction, 5)    \
+  T(ArrowFormalParametersProduction, 6)         \
+  T(LetPatternProduction, 7)                    \
+  T(CoverInitializedNameProduction, 8)          \
+  T(TailCallExpressionProduction, 9)            \
+  T(AsyncArrowFormalParametersProduction, 10)   \
+  T(AsyncBindingPatternProduction, 11)
+
+
 template <typename Traits>
 class ExpressionClassifier {
  public:
+  enum ErrorKind : unsigned {
+#define DEFINE_ERROR_KIND(NAME, CODE) k##NAME = CODE,
+    ERROR_CODES(DEFINE_ERROR_KIND)
+#undef DEFINE_ERROR_KIND
+    kUnusedError = 15  // Larger than error codes; should fit in 4 bits
+  };
+
   struct Error {
-    Error()
+    V8_INLINE Error()
         : location(Scanner::Location::invalid()),
           message(MessageTemplate::kNone),
+          kind(kUnusedError),
           type(kSyntaxError),
           arg(nullptr) {}
+    V8_INLINE explicit Error(Scanner::Location loc,
+                             MessageTemplate::Template msg, ErrorKind k,
+                             const char* a = nullptr,
+                             ParseErrorType t = kSyntaxError)
+        : location(loc), message(msg), kind(k), type(t), arg(a) {}
 
     Scanner::Location location;
-    MessageTemplate::Template message : 30;
+    MessageTemplate::Template message : 26;
+    unsigned kind : 4;
     ParseErrorType type : 2;
     const char* arg;
   };
 
-  enum TargetProduction {
-    ExpressionProduction = 1 << 0,
-    FormalParameterInitializerProduction = 1 << 1,
-    BindingPatternProduction = 1 << 2,
-    AssignmentPatternProduction = 1 << 3,
-    DistinctFormalParametersProduction = 1 << 4,
-    StrictModeFormalParametersProduction = 1 << 5,
-    ArrowFormalParametersProduction = 1 << 6,
-    LetPatternProduction = 1 << 7,
-    CoverInitializedNameProduction = 1 << 8,
-    TailCallExpressionProduction = 1 << 9,
-    AsyncArrowFormalParametersProduction = 1 << 10,
-    AsyncBindingPatternProduction = 1 << 11,
+  enum TargetProduction : unsigned {
+#define DEFINE_PRODUCTION(NAME, CODE) NAME = 1 << CODE,
+    ERROR_CODES(DEFINE_PRODUCTION)
+#undef DEFINE_PRODUCTION
 
     ExpressionProductions =
         (ExpressionProduction | FormalParameterInitializerProduction |
@@ -58,63 +78,75 @@
          AsyncArrowFormalParametersProduction | AsyncBindingPatternProduction)
   };
 
-  enum FunctionProperties { NonSimpleParameter = 1 << 0 };
+  enum FunctionProperties : unsigned {
+    NonSimpleParameter = 1 << 0
+  };
 
   explicit ExpressionClassifier(const Traits* t)
       : zone_(t->zone()),
         non_patterns_to_rewrite_(t->GetNonPatternList()),
+        reported_errors_(t->GetReportedErrorList()),
+        duplicate_finder_(nullptr),
         invalid_productions_(0),
-        function_properties_(0),
-        duplicate_finder_(nullptr) {
+        function_properties_(0) {
+    reported_errors_begin_ = reported_errors_end_ = reported_errors_->length();
     non_pattern_begin_ = non_patterns_to_rewrite_->length();
   }
 
   ExpressionClassifier(const Traits* t, DuplicateFinder* duplicate_finder)
       : zone_(t->zone()),
         non_patterns_to_rewrite_(t->GetNonPatternList()),
+        reported_errors_(t->GetReportedErrorList()),
+        duplicate_finder_(duplicate_finder),
         invalid_productions_(0),
-        function_properties_(0),
-        duplicate_finder_(duplicate_finder) {
+        function_properties_(0) {
+    reported_errors_begin_ = reported_errors_end_ = reported_errors_->length();
     non_pattern_begin_ = non_patterns_to_rewrite_->length();
   }
 
   ~ExpressionClassifier() { Discard(); }
 
-  bool is_valid(unsigned productions) const {
+  V8_INLINE bool is_valid(unsigned productions) const {
     return (invalid_productions_ & productions) == 0;
   }
 
-  DuplicateFinder* duplicate_finder() const { return duplicate_finder_; }
+  V8_INLINE DuplicateFinder* duplicate_finder() const {
+    return duplicate_finder_;
+  }
 
-  bool is_valid_expression() const { return is_valid(ExpressionProduction); }
+  V8_INLINE bool is_valid_expression() const {
+    return is_valid(ExpressionProduction);
+  }
 
-  bool is_valid_formal_parameter_initializer() const {
+  V8_INLINE bool is_valid_formal_parameter_initializer() const {
     return is_valid(FormalParameterInitializerProduction);
   }
 
-  bool is_valid_binding_pattern() const {
+  V8_INLINE bool is_valid_binding_pattern() const {
     return is_valid(BindingPatternProduction);
   }
 
-  bool is_valid_assignment_pattern() const {
+  V8_INLINE bool is_valid_assignment_pattern() const {
     return is_valid(AssignmentPatternProduction);
   }
 
-  bool is_valid_arrow_formal_parameters() const {
+  V8_INLINE bool is_valid_arrow_formal_parameters() const {
     return is_valid(ArrowFormalParametersProduction);
   }
 
-  bool is_valid_formal_parameter_list_without_duplicates() const {
+  V8_INLINE bool is_valid_formal_parameter_list_without_duplicates() const {
     return is_valid(DistinctFormalParametersProduction);
   }
 
   // Note: callers should also check
   // is_valid_formal_parameter_list_without_duplicates().
-  bool is_valid_strict_mode_formal_parameters() const {
+  V8_INLINE bool is_valid_strict_mode_formal_parameters() const {
     return is_valid(StrictModeFormalParametersProduction);
   }
 
-  bool is_valid_let_pattern() const { return is_valid(LetPatternProduction); }
+  V8_INLINE bool is_valid_let_pattern() const {
+    return is_valid(LetPatternProduction);
+  }
 
   bool is_valid_async_arrow_formal_parameters() const {
     return is_valid(AsyncArrowFormalParametersProduction);
@@ -124,58 +156,65 @@
     return is_valid(AsyncBindingPatternProduction);
   }
 
-  const Error& expression_error() const { return expression_error_; }
-
-  const Error& formal_parameter_initializer_error() const {
-    return formal_parameter_initializer_error_;
+  V8_INLINE const Error& expression_error() const {
+    return reported_error(kExpressionProduction);
   }
 
-  const Error& binding_pattern_error() const { return binding_pattern_error_; }
-
-  const Error& assignment_pattern_error() const {
-    return assignment_pattern_error_;
+  V8_INLINE const Error& formal_parameter_initializer_error() const {
+    return reported_error(kFormalParameterInitializerProduction);
   }
 
-  const Error& arrow_formal_parameters_error() const {
-    return arrow_formal_parameters_error_;
+  V8_INLINE const Error& binding_pattern_error() const {
+    return reported_error(kBindingPatternProduction);
   }
 
-  const Error& duplicate_formal_parameter_error() const {
-    return duplicate_formal_parameter_error_;
+  V8_INLINE const Error& assignment_pattern_error() const {
+    return reported_error(kAssignmentPatternProduction);
   }
 
-  const Error& strict_mode_formal_parameter_error() const {
-    return strict_mode_formal_parameter_error_;
+  V8_INLINE const Error& arrow_formal_parameters_error() const {
+    return reported_error(kArrowFormalParametersProduction);
   }
 
-  const Error& let_pattern_error() const { return let_pattern_error_; }
+  V8_INLINE const Error& duplicate_formal_parameter_error() const {
+    return reported_error(kDistinctFormalParametersProduction);
+  }
 
-  bool has_cover_initialized_name() const {
+  V8_INLINE const Error& strict_mode_formal_parameter_error() const {
+    return reported_error(kStrictModeFormalParametersProduction);
+  }
+
+  V8_INLINE const Error& let_pattern_error() const {
+    return reported_error(kLetPatternProduction);
+  }
+
+  V8_INLINE bool has_cover_initialized_name() const {
     return !is_valid(CoverInitializedNameProduction);
   }
-  const Error& cover_initialized_name_error() const {
-    return cover_initialized_name_error_;
+
+  V8_INLINE const Error& cover_initialized_name_error() const {
+    return reported_error(kCoverInitializedNameProduction);
   }
 
-  bool has_tail_call_expression() const {
+  V8_INLINE bool has_tail_call_expression() const {
     return !is_valid(TailCallExpressionProduction);
   }
-  const Error& tail_call_expression_error() const {
-    return tail_call_expression_error_;
+  V8_INLINE const Error& tail_call_expression_error() const {
+    return reported_error(kTailCallExpressionProduction);
   }
-  const Error& async_arrow_formal_parameters_error() const {
-    return async_arrow_formal_parameters_error_;
+  V8_INLINE const Error& async_arrow_formal_parameters_error() const {
+    return reported_error(kAsyncArrowFormalParametersProduction);
   }
 
-  const Error& async_binding_pattern_error() const {
-    return async_binding_pattern_error_;
+  V8_INLINE const Error& async_binding_pattern_error() const {
+    return reported_error(kAsyncBindingPatternProduction);
   }
 
-  bool is_simple_parameter_list() const {
+  V8_INLINE bool is_simple_parameter_list() const {
     return !(function_properties_ & NonSimpleParameter);
   }
 
-  void RecordNonSimpleParameter() {
+  V8_INLINE void RecordNonSimpleParameter() {
     function_properties_ |= NonSimpleParameter;
   }
 
@@ -184,9 +223,7 @@
                              const char* arg = nullptr) {
     if (!is_valid_expression()) return;
     invalid_productions_ |= ExpressionProduction;
-    expression_error_.location = loc;
-    expression_error_.message = message;
-    expression_error_.arg = arg;
+    Add(Error(loc, message, kExpressionProduction, arg));
   }
 
   void RecordExpressionError(const Scanner::Location& loc,
@@ -194,10 +231,7 @@
                              ParseErrorType type, const char* arg = nullptr) {
     if (!is_valid_expression()) return;
     invalid_productions_ |= ExpressionProduction;
-    expression_error_.location = loc;
-    expression_error_.message = message;
-    expression_error_.arg = arg;
-    expression_error_.type = type;
+    Add(Error(loc, message, kExpressionProduction, arg, type));
   }
 
   void RecordFormalParameterInitializerError(const Scanner::Location& loc,
@@ -205,9 +239,7 @@
                                              const char* arg = nullptr) {
     if (!is_valid_formal_parameter_initializer()) return;
     invalid_productions_ |= FormalParameterInitializerProduction;
-    formal_parameter_initializer_error_.location = loc;
-    formal_parameter_initializer_error_.message = message;
-    formal_parameter_initializer_error_.arg = arg;
+    Add(Error(loc, message, kFormalParameterInitializerProduction, arg));
   }
 
   void RecordBindingPatternError(const Scanner::Location& loc,
@@ -215,9 +247,7 @@
                                  const char* arg = nullptr) {
     if (!is_valid_binding_pattern()) return;
     invalid_productions_ |= BindingPatternProduction;
-    binding_pattern_error_.location = loc;
-    binding_pattern_error_.message = message;
-    binding_pattern_error_.arg = arg;
+    Add(Error(loc, message, kBindingPatternProduction, arg));
   }
 
   void RecordAssignmentPatternError(const Scanner::Location& loc,
@@ -225,9 +255,7 @@
                                     const char* arg = nullptr) {
     if (!is_valid_assignment_pattern()) return;
     invalid_productions_ |= AssignmentPatternProduction;
-    assignment_pattern_error_.location = loc;
-    assignment_pattern_error_.message = message;
-    assignment_pattern_error_.arg = arg;
+    Add(Error(loc, message, kAssignmentPatternProduction, arg));
   }
 
   void RecordPatternError(const Scanner::Location& loc,
@@ -242,9 +270,7 @@
                                         const char* arg = nullptr) {
     if (!is_valid_arrow_formal_parameters()) return;
     invalid_productions_ |= ArrowFormalParametersProduction;
-    arrow_formal_parameters_error_.location = loc;
-    arrow_formal_parameters_error_.message = message;
-    arrow_formal_parameters_error_.arg = arg;
+    Add(Error(loc, message, kArrowFormalParametersProduction, arg));
   }
 
   void RecordAsyncArrowFormalParametersError(const Scanner::Location& loc,
@@ -252,9 +278,7 @@
                                              const char* arg = nullptr) {
     if (!is_valid_async_arrow_formal_parameters()) return;
     invalid_productions_ |= AsyncArrowFormalParametersProduction;
-    async_arrow_formal_parameters_error_.location = loc;
-    async_arrow_formal_parameters_error_.message = message;
-    async_arrow_formal_parameters_error_.arg = arg;
+    Add(Error(loc, message, kAsyncArrowFormalParametersProduction, arg));
   }
 
   void RecordAsyncBindingPatternError(const Scanner::Location& loc,
@@ -262,17 +286,14 @@
                                       const char* arg = nullptr) {
     if (!is_valid_async_binding_pattern()) return;
     invalid_productions_ |= AsyncBindingPatternProduction;
-    async_binding_pattern_error_.location = loc;
-    async_binding_pattern_error_.message = message;
-    async_binding_pattern_error_.arg = arg;
+    Add(Error(loc, message, kAsyncBindingPatternProduction, arg));
   }
 
   void RecordDuplicateFormalParameterError(const Scanner::Location& loc) {
     if (!is_valid_formal_parameter_list_without_duplicates()) return;
     invalid_productions_ |= DistinctFormalParametersProduction;
-    duplicate_formal_parameter_error_.location = loc;
-    duplicate_formal_parameter_error_.message = MessageTemplate::kParamDupe;
-    duplicate_formal_parameter_error_.arg = nullptr;
+    Add(Error(loc, MessageTemplate::kParamDupe,
+              kDistinctFormalParametersProduction));
   }
 
   // Record a binding that would be invalid in strict mode.  Confusingly this
@@ -283,9 +304,7 @@
                                             const char* arg = nullptr) {
     if (!is_valid_strict_mode_formal_parameters()) return;
     invalid_productions_ |= StrictModeFormalParametersProduction;
-    strict_mode_formal_parameter_error_.location = loc;
-    strict_mode_formal_parameter_error_.message = message;
-    strict_mode_formal_parameter_error_.arg = arg;
+    Add(Error(loc, message, kStrictModeFormalParametersProduction, arg));
   }
 
   void RecordLetPatternError(const Scanner::Location& loc,
@@ -293,9 +312,7 @@
                              const char* arg = nullptr) {
     if (!is_valid_let_pattern()) return;
     invalid_productions_ |= LetPatternProduction;
-    let_pattern_error_.location = loc;
-    let_pattern_error_.message = message;
-    let_pattern_error_.arg = arg;
+    Add(Error(loc, message, kLetPatternProduction, arg));
   }
 
   void RecordCoverInitializedNameError(const Scanner::Location& loc,
@@ -303,9 +320,7 @@
                                        const char* arg = nullptr) {
     if (has_cover_initialized_name()) return;
     invalid_productions_ |= CoverInitializedNameProduction;
-    cover_initialized_name_error_.location = loc;
-    cover_initialized_name_error_.message = message;
-    cover_initialized_name_error_.arg = arg;
+    Add(Error(loc, message, kCoverInitializedNameProduction, arg));
   }
 
   void RecordTailCallExpressionError(const Scanner::Location& loc,
@@ -313,83 +328,102 @@
                                      const char* arg = nullptr) {
     if (has_tail_call_expression()) return;
     invalid_productions_ |= TailCallExpressionProduction;
-    tail_call_expression_error_.location = loc;
-    tail_call_expression_error_.message = message;
-    tail_call_expression_error_.arg = arg;
+    Add(Error(loc, message, kTailCallExpressionProduction, arg));
   }
 
   void ForgiveCoverInitializedNameError() {
+    if (!(invalid_productions_ & CoverInitializedNameProduction)) return;
+    Error& e = reported_error(kCoverInitializedNameProduction);
+    e.kind = kUnusedError;
     invalid_productions_ &= ~CoverInitializedNameProduction;
-    cover_initialized_name_error_ = Error();
   }
 
   void ForgiveAssignmentPatternError() {
+    if (!(invalid_productions_ & AssignmentPatternProduction)) return;
+    Error& e = reported_error(kAssignmentPatternProduction);
+    e.kind = kUnusedError;
     invalid_productions_ &= ~AssignmentPatternProduction;
-    assignment_pattern_error_ = Error();
   }
 
   void Accumulate(ExpressionClassifier* inner,
                   unsigned productions = StandardProductions,
                   bool merge_non_patterns = true) {
+    DCHECK_EQ(inner->reported_errors_, reported_errors_);
+    DCHECK_EQ(inner->reported_errors_begin_, reported_errors_end_);
+    DCHECK_EQ(inner->reported_errors_end_, reported_errors_->length());
     if (merge_non_patterns) MergeNonPatterns(inner);
     // Propagate errors from inner, but don't overwrite already recorded
     // errors.
     unsigned non_arrow_inner_invalid_productions =
         inner->invalid_productions_ & ~ArrowFormalParametersProduction;
-    if (non_arrow_inner_invalid_productions == 0) return;
-    unsigned non_arrow_productions =
-        productions & ~ArrowFormalParametersProduction;
-    unsigned errors =
-        non_arrow_productions & non_arrow_inner_invalid_productions;
-    errors &= ~invalid_productions_;
-    if (errors != 0) {
-      invalid_productions_ |= errors;
-      if (errors & ExpressionProduction)
-        expression_error_ = inner->expression_error_;
-      if (errors & FormalParameterInitializerProduction)
-        formal_parameter_initializer_error_ =
-            inner->formal_parameter_initializer_error_;
-      if (errors & BindingPatternProduction)
-        binding_pattern_error_ = inner->binding_pattern_error_;
-      if (errors & AssignmentPatternProduction)
-        assignment_pattern_error_ = inner->assignment_pattern_error_;
-      if (errors & DistinctFormalParametersProduction)
-        duplicate_formal_parameter_error_ =
-            inner->duplicate_formal_parameter_error_;
-      if (errors & StrictModeFormalParametersProduction)
-        strict_mode_formal_parameter_error_ =
-            inner->strict_mode_formal_parameter_error_;
-      if (errors & LetPatternProduction)
-        let_pattern_error_ = inner->let_pattern_error_;
-      if (errors & CoverInitializedNameProduction)
-        cover_initialized_name_error_ = inner->cover_initialized_name_error_;
-      if (errors & TailCallExpressionProduction)
-        tail_call_expression_error_ = inner->tail_call_expression_error_;
-      if (errors & AsyncArrowFormalParametersProduction)
-        async_arrow_formal_parameters_error_ =
-            inner->async_arrow_formal_parameters_error_;
-      if (errors & AsyncBindingPatternProduction)
-        async_binding_pattern_error_ = inner->async_binding_pattern_error_;
-    }
-
-    // As an exception to the above, the result continues to be a valid arrow
-    // formal parameters if the inner expression is a valid binding pattern.
-    if (productions & ArrowFormalParametersProduction &&
-        is_valid_arrow_formal_parameters()) {
-      // Also copy function properties if expecting an arrow function
-      // parameter.
-      function_properties_ |= inner->function_properties_;
-
-      if (!inner->is_valid_binding_pattern()) {
-        invalid_productions_ |= ArrowFormalParametersProduction;
-        arrow_formal_parameters_error_ = inner->binding_pattern_error_;
+    if (non_arrow_inner_invalid_productions) {
+      unsigned errors = non_arrow_inner_invalid_productions & productions &
+                        ~invalid_productions_;
+      // The result will continue to be a valid arrow formal parameters if the
+      // inner expression is a valid binding pattern.
+      bool copy_BP_to_AFP = false;
+      if (productions & ArrowFormalParametersProduction &&
+          is_valid_arrow_formal_parameters()) {
+        // Also copy function properties if expecting an arrow function
+        // parameter.
+        function_properties_ |= inner->function_properties_;
+        if (!inner->is_valid_binding_pattern()) {
+          copy_BP_to_AFP = true;
+          invalid_productions_ |= ArrowFormalParametersProduction;
+        }
+      }
+      // Traverse the list of errors reported by the inner classifier
+      // to copy what's necessary.
+      if (errors != 0 || copy_BP_to_AFP) {
+        invalid_productions_ |= errors;
+        int binding_pattern_index = inner->reported_errors_end_;
+        for (int i = inner->reported_errors_begin_;
+             i < inner->reported_errors_end_; i++) {
+          int k = reported_errors_->at(i).kind;
+          if (errors & (1 << k)) Copy(i);
+          // Check if it's a BP error that has to be copied to an AFP error.
+          if (k == kBindingPatternProduction && copy_BP_to_AFP) {
+            if (reported_errors_end_ <= i) {
+              // If the BP error itself has not already been copied,
+              // copy it now and change it to an AFP error.
+              Copy(i);
+              reported_errors_->at(reported_errors_end_-1).kind =
+                  kArrowFormalParametersProduction;
+            } else {
+              // Otherwise, if the BP error was already copied, keep its
+              // position and wait until the end of the traversal.
+              DCHECK_EQ(reported_errors_end_, i+1);
+              binding_pattern_index = i;
+            }
+          }
+        }
+        // Do we still have to copy the BP error to an AFP error?
+        if (binding_pattern_index < inner->reported_errors_end_) {
+          // If there's still unused space in the list of the inner
+          // classifier, copy it there, otherwise add it to the end
+          // of the list.
+          if (reported_errors_end_ < inner->reported_errors_end_)
+            Copy(binding_pattern_index);
+          else
+            Add(reported_errors_->at(binding_pattern_index));
+          reported_errors_->at(reported_errors_end_-1).kind =
+              kArrowFormalParametersProduction;
+        }
       }
     }
+    reported_errors_->Rewind(reported_errors_end_);
+    inner->reported_errors_begin_ = inner->reported_errors_end_ =
+        reported_errors_end_;
   }
 
   V8_INLINE int GetNonPatternBegin() const { return non_pattern_begin_; }
 
   V8_INLINE void Discard() {
+    if (reported_errors_end_ == reported_errors_->length()) {
+      reported_errors_->Rewind(reported_errors_begin_);
+      reported_errors_end_ = reported_errors_begin_;
+    }
+    DCHECK_EQ(reported_errors_begin_, reported_errors_end_);
     DCHECK_LE(non_pattern_begin_, non_patterns_to_rewrite_->length());
     non_patterns_to_rewrite_->Rewind(non_pattern_begin_);
   }
@@ -400,29 +434,69 @@
   }
 
  private:
+  V8_INLINE Error& reported_error(ErrorKind kind) const {
+    if (invalid_productions_ & (1 << kind)) {
+      for (int i = reported_errors_begin_; i < reported_errors_end_; i++) {
+        if (reported_errors_->at(i).kind == kind)
+          return reported_errors_->at(i);
+      }
+      UNREACHABLE();
+    }
+    // We should only be looking for an error when we know that one has
+    // been reported.  But we're not...  So this is to make sure we have
+    // the same behaviour.
+    static Error none;
+    return none;
+  }
+
+  // Adds e to the end of the list of reported errors for this classifier.
+  // It is expected that this classifier is the last one in the stack.
+  V8_INLINE void Add(const Error& e) {
+    DCHECK_EQ(reported_errors_end_, reported_errors_->length());
+    reported_errors_->Add(e, zone_);
+    reported_errors_end_++;
+  }
+
+  // Copies the error at position i of the list of reported errors, so that
+  // it becomes the last error reported for this classifier.  Position i
+  // could be either after the existing errors of this classifier (i.e.,
+  // in an inner classifier) or it could be an existing error (in case a
+  // copy is needed).
+  V8_INLINE void Copy(int i) {
+    DCHECK_LT(i, reported_errors_->length());
+    if (reported_errors_end_ != i)
+      reported_errors_->at(reported_errors_end_) = reported_errors_->at(i);
+    reported_errors_end_++;
+  }
+
   Zone* zone_;
   ZoneList<typename Traits::Type::Expression>* non_patterns_to_rewrite_;
-  int non_pattern_begin_;
-  unsigned invalid_productions_;
-  unsigned function_properties_;
-  // TODO(ishell): consider using Zone[Hash]Map<TargetProduction, Error>
-  // here to consume less stack space during parsing.
-  Error expression_error_;
-  Error formal_parameter_initializer_error_;
-  Error binding_pattern_error_;
-  Error assignment_pattern_error_;
-  Error arrow_formal_parameters_error_;
-  Error duplicate_formal_parameter_error_;
-  Error strict_mode_formal_parameter_error_;
-  Error let_pattern_error_;
-  Error cover_initialized_name_error_;
-  Error tail_call_expression_error_;
-  Error async_arrow_formal_parameters_error_;
-  Error async_binding_pattern_error_;
+  ZoneList<Error>* reported_errors_;
   DuplicateFinder* duplicate_finder_;
+  // The uint16_t for non_pattern_begin_ will not be enough in the case,
+  // e.g., of an array literal containing more than 64K inner array
+  // literals with spreads, as in:
+  // var N=65536; eval("var x=[];" + "[" + "[...x],".repeat(N) + "].length");
+  // An implementation limit error in ParserBase::AddNonPatternForRewriting
+  // will be triggered in this case.
+  uint16_t non_pattern_begin_;
+  unsigned invalid_productions_ : 14;
+  unsigned function_properties_ : 2;
+  // The uint16_t for reported_errors_begin_ and reported_errors_end_ will
+  // not be enough in the case of a long series of expressions using nested
+  // classifiers, e.g., a long sequence of assignments, as in:
+  // literals with spreads, as in:
+  // var N=65536; eval("var x;" + "x=".repeat(N) + "42");
+  // This should not be a problem, as such things currently fail with a
+  // stack overflow while parsing.
+  uint16_t reported_errors_begin_;
+  uint16_t reported_errors_end_;
 };
 
 
+#undef ERROR_CODES
+
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/parsing/parameter-initializer-rewriter.cc b/src/parsing/parameter-initializer-rewriter.cc
index 6362c63..4bb367d 100644
--- a/src/parsing/parameter-initializer-rewriter.cc
+++ b/src/parsing/parameter-initializer-rewriter.cc
@@ -24,7 +24,9 @@
            Scope* new_scope)
       : AstExpressionVisitor(stack_limit, initializer),
         old_scope_(old_scope),
-        new_scope_(new_scope) {}
+        new_scope_(new_scope),
+        old_scope_closure_(old_scope->ClosureScope()),
+        new_scope_closure_(new_scope->ClosureScope()) {}
   ~Rewriter();
 
  private:
@@ -40,6 +42,8 @@
 
   Scope* old_scope_;
   Scope* new_scope_;
+  Scope* old_scope_closure_;
+  Scope* new_scope_closure_;
   std::vector<std::pair<Variable*, int>> temps_;
 };
 
@@ -55,8 +59,8 @@
     // Ensure that we add temporaries in the order they appeared in old_scope_.
     std::sort(temps_.begin(), temps_.end(), LessThanSecond());
     for (auto var_and_index : temps_) {
-      var_and_index.first->set_scope(new_scope_);
-      new_scope_->AddTemporary(var_and_index.first);
+      var_and_index.first->set_scope(new_scope_closure_);
+      new_scope_closure_->AddTemporary(var_and_index.first);
     }
   }
 }
@@ -90,11 +94,11 @@
   if (proxy->is_resolved()) {
     Variable* var = proxy->var();
     if (var->mode() != TEMPORARY) return;
-    // For rewriting inside the same ClosureScope (e.g., putting default
-    // parameter values in their own inner scope in certain cases), refrain
-    // from invalidly moving temporaries to a block scope.
-    if (var->scope()->ClosureScope() == new_scope_->ClosureScope()) return;
-    int index = old_scope_->RemoveTemporary(var);
+    // Temporaries are only placed in ClosureScopes.
+    DCHECK_EQ(var->scope(), var->scope()->ClosureScope());
+    // If the temporary is already where it should be, return quickly.
+    if (var->scope() == new_scope_closure_) return;
+    int index = old_scope_closure_->RemoveTemporary(var);
     if (index >= 0) {
       temps_.push_back(std::make_pair(var, index));
     }
diff --git a/src/parsing/parser-base.h b/src/parsing/parser-base.h
index 6086f7a..669defa 100644
--- a/src/parsing/parser-base.h
+++ b/src/parsing/parser-base.h
@@ -7,7 +7,7 @@
 
 #include "src/ast/scopes.h"
 #include "src/bailout-reason.h"
-#include "src/hashmap.h"
+#include "src/base/hashmap.h"
 #include "src/messages.h"
 #include "src/parsing/expression-classifier.h"
 #include "src/parsing/func-name-inferrer.h"
@@ -196,9 +196,9 @@
         allow_harmony_restrictive_declarations_(false),
         allow_harmony_do_expressions_(false),
         allow_harmony_for_in_(false),
-        allow_harmony_function_name_(false),
         allow_harmony_function_sent_(false),
-        allow_harmony_async_await_(false) {}
+        allow_harmony_async_await_(false),
+        allow_harmony_restrictive_generators_(false) {}
 
 #define ALLOW_ACCESSORS(name)                           \
   bool allow_##name() const { return allow_##name##_; } \
@@ -216,9 +216,9 @@
   ALLOW_ACCESSORS(harmony_restrictive_declarations);
   ALLOW_ACCESSORS(harmony_do_expressions);
   ALLOW_ACCESSORS(harmony_for_in);
-  ALLOW_ACCESSORS(harmony_function_name);
   ALLOW_ACCESSORS(harmony_function_sent);
   ALLOW_ACCESSORS(harmony_async_await);
+  ALLOW_ACCESSORS(harmony_restrictive_generators);
   SCANNER_ACCESSORS(harmony_exponentiation_operator);
 
 #undef SCANNER_ACCESSORS
@@ -385,8 +385,8 @@
 
     typename Traits::Type::Factory* factory() { return factory_; }
 
-    const List<DestructuringAssignment>& destructuring_assignments_to_rewrite()
-        const {
+    const ZoneList<DestructuringAssignment>&
+        destructuring_assignments_to_rewrite() const {
       return destructuring_assignments_to_rewrite_;
     }
 
@@ -408,6 +408,10 @@
       }
     }
 
+    ZoneList<typename ExpressionClassifier::Error>* GetReportedErrorList() {
+      return &reported_errors_;
+    }
+
     ReturnExprContext return_expr_context() const {
       return return_expr_context_;
     }
@@ -429,13 +433,16 @@
 
    private:
     void AddDestructuringAssignment(DestructuringAssignment pair) {
-      destructuring_assignments_to_rewrite_.Add(pair);
+      destructuring_assignments_to_rewrite_.Add(pair, (*scope_stack_)->zone());
     }
 
     V8_INLINE Scope* scope() { return *scope_stack_; }
 
-    void AddNonPatternForRewriting(ExpressionT expr) {
+    void AddNonPatternForRewriting(ExpressionT expr, bool* ok) {
       non_patterns_to_rewrite_.Add(expr, (*scope_stack_)->zone());
+      if (non_patterns_to_rewrite_.length() >=
+          std::numeric_limits<uint16_t>::max())
+        *ok = false;
     }
 
     // Used to assign an index to each literal that needs materialization in
@@ -466,11 +473,13 @@
     Scope** scope_stack_;
     Scope* outer_scope_;
 
-    List<DestructuringAssignment> destructuring_assignments_to_rewrite_;
+    ZoneList<DestructuringAssignment> destructuring_assignments_to_rewrite_;
     TailCallExpressionList tail_call_expressions_;
     ReturnExprContext return_expr_context_;
     ZoneList<ExpressionT> non_patterns_to_rewrite_;
 
+    ZoneList<typename ExpressionClassifier::Error> reported_errors_;
+
     typename Traits::Type::Factory* factory_;
 
     // If true, the next (and immediately following) function literal is
@@ -658,13 +667,13 @@
     Expect(Token::SEMICOLON, ok);
   }
 
-  bool peek_any_identifier() {
-    Token::Value next = peek();
-    return next == Token::IDENTIFIER || next == Token::ENUM ||
-           next == Token::AWAIT || next == Token::ASYNC ||
-           next == Token::FUTURE_STRICT_RESERVED_WORD || next == Token::LET ||
-           next == Token::STATIC || next == Token::YIELD;
+  bool is_any_identifier(Token::Value token) {
+    return token == Token::IDENTIFIER || token == Token::ENUM ||
+           token == Token::AWAIT || token == Token::ASYNC ||
+           token == Token::FUTURE_STRICT_RESERVED_WORD || token == Token::LET ||
+           token == Token::STATIC || token == Token::YIELD;
   }
+  bool peek_any_identifier() { return is_any_identifier(peek()); }
 
   bool CheckContextualKeyword(Vector<const char> keyword) {
     if (PeekContextualKeyword(keyword)) {
@@ -877,6 +886,10 @@
     }
   }
 
+  bool IsValidArrowFormalParametersStart(Token::Value token) {
+    return is_any_identifier(token) || token == Token::LPAREN;
+  }
+
   void ValidateArrowFormalParameters(const ExpressionClassifier* classifier,
                                      ExpressionT expr,
                                      bool parenthesized_formals, bool is_async,
@@ -1173,9 +1186,9 @@
   bool allow_harmony_restrictive_declarations_;
   bool allow_harmony_do_expressions_;
   bool allow_harmony_for_in_;
-  bool allow_harmony_function_name_;
   bool allow_harmony_function_sent_;
   bool allow_harmony_async_await_;
+  bool allow_harmony_restrictive_generators_;
 };
 
 template <class Traits>
@@ -1193,9 +1206,11 @@
       outer_function_state_(*function_state_stack),
       scope_stack_(scope_stack),
       outer_scope_(*scope_stack),
+      destructuring_assignments_to_rewrite_(16, scope->zone()),
       tail_call_expressions_(scope->zone()),
       return_expr_context_(ReturnExprContext::kInsideValidBlock),
       non_patterns_to_rewrite_(0, scope->zone()),
+      reported_errors_(16, scope->zone()),
       factory_(factory),
       next_function_is_parenthesized_(false),
       this_function_is_parenthesized_(false) {
@@ -1552,12 +1567,11 @@
       // Parentheses are not valid on the LHS of a BindingPattern, so we use the
       // is_valid_binding_pattern() check to detect multiple levels of
       // parenthesization.
-      if (!classifier->is_valid_binding_pattern()) {
-        ArrowFormalParametersUnexpectedToken(classifier);
-      }
+      bool pattern_error = !classifier->is_valid_binding_pattern();
       classifier->RecordPatternError(scanner()->peek_location(),
                                      MessageTemplate::kUnexpectedToken,
                                      Token::String(Token::LPAREN));
+      if (pattern_error) ArrowFormalParametersUnexpectedToken(classifier);
       Consume(Token::LPAREN);
       if (Check(Token::RPAREN)) {
         // ()=>x.  The continuation that looks for the => is in
@@ -1575,8 +1589,11 @@
                                           MessageTemplate::kUnexpectedToken,
                                           Token::String(Token::ELLIPSIS));
         classifier->RecordNonSimpleParameter();
-        ExpressionT expr =
-            this->ParseAssignmentExpression(true, classifier, CHECK_OK);
+        ExpressionClassifier binding_classifier(this);
+        ExpressionT expr = this->ParseAssignmentExpression(
+            true, &binding_classifier, CHECK_OK);
+        classifier->Accumulate(&binding_classifier,
+                               ExpressionClassifier::AllProductions);
         if (!this->IsIdentifier(expr) && !IsValidPattern(expr)) {
           classifier->RecordArrowFormalParametersError(
               Scanner::Location(ellipsis_pos, scanner()->location().end_pos),
@@ -1663,11 +1680,14 @@
   //   AssignmentExpression
   //   Expression ',' AssignmentExpression
 
-  ExpressionClassifier binding_classifier(this);
-  ExpressionT result =
-      this->ParseAssignmentExpression(accept_IN, &binding_classifier, CHECK_OK);
-  classifier->Accumulate(&binding_classifier,
-                         ExpressionClassifier::AllProductions);
+  ExpressionT result = this->EmptyExpression();
+  {
+    ExpressionClassifier binding_classifier(this);
+    result = this->ParseAssignmentExpression(accept_IN, &binding_classifier,
+                                             CHECK_OK);
+    classifier->Accumulate(&binding_classifier,
+                           ExpressionClassifier::AllProductions);
+  }
   bool is_simple_parameter_list = this->IsIdentifier(result);
   bool seen_rest = false;
   while (peek() == Token::COMMA) {
@@ -1690,6 +1710,7 @@
       seen_rest = is_rest = true;
     }
     int pos = position(), expr_pos = peek_position();
+    ExpressionClassifier binding_classifier(this);
     ExpressionT right = this->ParseAssignmentExpression(
         accept_IN, &binding_classifier, CHECK_OK);
     classifier->Accumulate(&binding_classifier,
@@ -1777,7 +1798,15 @@
                                                   literal_index, pos);
   if (first_spread_index >= 0) {
     result = factory()->NewRewritableExpression(result);
-    Traits::QueueNonPatternForRewriting(result);
+    Traits::QueueNonPatternForRewriting(result, ok);
+    if (!*ok) {
+      // If the non-pattern rewriting mechanism is used in the future for
+      // rewriting other things than spreads, this error message will have
+      // to change.  Also, this error message will never appear while pre-
+      // parsing (this is OK, as it is an implementation limitation).
+      ReportMessage(MessageTemplate::kTooManySpreads);
+      return this->EmptyExpression();
+    }
   }
   return result;
 }
@@ -1917,10 +1946,16 @@
         classifier->RecordLetPatternError(
             scanner()->location(), MessageTemplate::kLetInLexicalBinding);
       }
-      if (is_await && is_async_function()) {
-        classifier->RecordPatternError(
-            Scanner::Location(next_beg_pos, next_end_pos),
-            MessageTemplate::kAwaitBindingIdentifier);
+      if (is_await) {
+        if (is_async_function()) {
+          classifier->RecordPatternError(
+              Scanner::Location(next_beg_pos, next_end_pos),
+              MessageTemplate::kAwaitBindingIdentifier);
+        } else {
+          classifier->RecordAsyncArrowFormalParametersError(
+              Scanner::Location(next_beg_pos, next_end_pos),
+              MessageTemplate::kAwaitBindingIdentifier);
+        }
       }
       ExpressionT lhs = this->ExpressionFromIdentifier(
           *name, next_beg_pos, next_end_pos, scope_, factory());
@@ -1941,9 +1976,7 @@
             Scanner::Location(next_beg_pos, scanner()->location().end_pos),
             MessageTemplate::kInvalidCoverInitializedName);
 
-        if (allow_harmony_function_name()) {
-          Traits::SetFunctionNameFromIdentifierRef(rhs, lhs);
-        }
+        Traits::SetFunctionNameFromIdentifierRef(rhs, lhs);
       } else {
         value = lhs;
       }
@@ -2098,9 +2131,7 @@
 
     if (fni_ != nullptr) fni_->Infer();
 
-    if (allow_harmony_function_name()) {
-      Traits::SetFunctionNameFromPropertyName(property, name);
-    }
+    Traits::SetFunctionNameFromPropertyName(property, name);
   }
   Expect(Token::RBRACE, CHECK_OK);
 
@@ -2135,7 +2166,10 @@
     ExpressionT argument = this->ParseAssignmentExpression(
         true, classifier, CHECK_OK_CUSTOM(NullExpressionList));
     CheckNoTailCallExpressions(classifier, CHECK_OK_CUSTOM(NullExpressionList));
-    Traits::RewriteNonPattern(classifier, CHECK_OK_CUSTOM(NullExpressionList));
+    if (!maybe_arrow) {
+      Traits::RewriteNonPattern(classifier,
+                                CHECK_OK_CUSTOM(NullExpressionList));
+    }
     if (is_spread) {
       if (!spread_arg.IsValid()) {
         spread_arg.beg_pos = start_pos;
@@ -2172,11 +2206,17 @@
   }
   *first_spread_arg_loc = spread_arg;
 
-  if ((!maybe_arrow || peek() != Token::ARROW) && spread_arg.IsValid()) {
-    // Unspread parameter sequences are translated into array literals in the
-    // parser. Ensure that the number of materialized literals matches between
-    // the parser and preparser
-    Traits::MaterializeUnspreadArgumentsLiterals(unspread_sequences_count);
+  if (!maybe_arrow || peek() != Token::ARROW) {
+    if (maybe_arrow) {
+      Traits::RewriteNonPattern(classifier,
+                                CHECK_OK_CUSTOM(NullExpressionList));
+    }
+    if (spread_arg.IsValid()) {
+      // Unspread parameter sequences are translated into array literals in the
+      // parser. Ensure that the number of materialized literals matches between
+      // the parser and preparser
+      Traits::MaterializeUnspreadArgumentsLiterals(unspread_sequences_count);
+    }
   }
 
   return result;
@@ -2206,7 +2246,8 @@
                                                 classifier->duplicate_finder());
 
   bool is_async = allow_harmony_async_await() && peek() == Token::ASYNC &&
-                  !scanner()->HasAnyLineTerminatorAfterNext();
+                  !scanner()->HasAnyLineTerminatorAfterNext() &&
+                  IsValidArrowFormalParametersStart(PeekAhead());
 
   bool parenthesized_formals = peek() == Token::LPAREN;
   if (!is_async && !parenthesized_formals) {
@@ -2224,9 +2265,7 @@
   }
 
   if (peek() == Token::ARROW) {
-    classifier->RecordPatternError(scanner()->peek_location(),
-                                   MessageTemplate::kUnexpectedToken,
-                                   Token::String(Token::ARROW));
+    Scanner::Location arrow_loc = scanner()->peek_location();
     ValidateArrowFormalParameters(&arrow_formals_classifier, expression,
                                   parenthesized_formals, is_async, CHECK_OK);
     // This reads strangely, but is correct: it checks whether any
@@ -2263,6 +2302,10 @@
     }
     expression = this->ParseArrowFunctionLiteral(
         accept_IN, parameters, is_async, arrow_formals_classifier, CHECK_OK);
+    arrow_formals_classifier.Discard();
+    classifier->RecordPatternError(arrow_loc,
+                                   MessageTemplate::kUnexpectedToken,
+                                   Token::String(Token::ARROW));
 
     if (fni_ != nullptr) fni_->Infer();
 
@@ -2352,7 +2395,7 @@
     }
   }
 
-  if (op == Token::ASSIGN && allow_harmony_function_name()) {
+  if (op == Token::ASSIGN) {
     Traits::SetFunctionNameFromIdentifierRef(right, expression);
   }
 
@@ -2457,6 +2500,12 @@
     *ok = false;
     return Traits::EmptyExpression();
   }
+  if (is_resumable()) {
+    Scanner::Location sub_loc(sub_expression_pos, loc.end_pos);
+    ReportMessageAt(sub_loc, MessageTemplate::kUnexpectedTailCall);
+    *ok = false;
+    return Traits::EmptyExpression();
+  }
   ReturnExprContext return_expr_context =
       function_state_->return_expr_context();
   if (return_expr_context != ReturnExprContext::kInsideValidReturnStatement) {
@@ -2502,8 +2551,8 @@
   if (peek() != Token::CONDITIONAL) return expression;
   CheckNoTailCallExpressions(classifier, CHECK_OK);
   Traits::RewriteNonPattern(classifier, CHECK_OK);
-  ArrowFormalParametersUnexpectedToken(classifier);
   BindingPatternUnexpectedToken(classifier);
+  ArrowFormalParametersUnexpectedToken(classifier);
   Consume(Token::CONDITIONAL);
   // In parsing the first assignment expression in conditional
   // expressions we always accept the 'in' keyword; see ECMA-262,
@@ -2667,6 +2716,8 @@
       default:
         break;
     }
+
+    int await_pos = peek_position();
     Consume(Token::AWAIT);
 
     ExpressionT value = ParseUnaryExpression(classifier, CHECK_OK);
@@ -2674,7 +2725,7 @@
     classifier->RecordFormalParameterInitializerError(
         Scanner::Location(beg_pos, scanner()->location().end_pos),
         MessageTemplate::kAwaitExpressionFormalParameter);
-    return Traits::RewriteAwaitExpression(value, beg_pos);
+    return Traits::RewriteAwaitExpression(value, await_pos);
   } else {
     return this->ParsePostfixExpression(classifier, ok);
   }
@@ -3148,9 +3199,7 @@
     init_classifier.Discard();
     classifier->RecordNonSimpleParameter();
 
-    if (allow_harmony_function_name()) {
-      Traits::SetFunctionNameFromIdentifierRef(initializer, pattern);
-    }
+    Traits::SetFunctionNameFromIdentifierRef(initializer, pattern);
   }
 
   Traits::AddFormalParameter(parameters, pattern, initializer,
@@ -3317,7 +3366,6 @@
     } else {
       // Single-expression body
       int pos = position();
-      ExpressionClassifier classifier(this);
       DCHECK(ReturnExprContext::kInsideValidBlock ==
              function_state_->return_expr_context());
       ReturnExprScope allow_tail_calls(
@@ -3325,6 +3373,7 @@
       body = this->NewStatementList(1, zone());
       this->AddParameterInitializationBlock(formal_parameters, body, is_async,
                                             CHECK_OK);
+      ExpressionClassifier classifier(this);
       if (is_async) {
         this->ParseAsyncArrowSingleExpressionBody(body, accept_IN, &classifier,
                                                   pos, CHECK_OK);
diff --git a/src/parsing/parser.cc b/src/parsing/parser.cc
index 822c49e..a39d0ee 100644
--- a/src/parsing/parser.cc
+++ b/src/parsing/parser.cc
@@ -205,7 +205,16 @@
     body = new (zone()) ZoneList<Statement*>(call_super ? 2 : 1, zone());
     if (call_super) {
       // $super_constructor = %_GetSuperConstructor(<this-function>)
-      // %reflect_construct($super_constructor, arguments, new.target)
+      // %reflect_construct(
+      //     $super_constructor, InternalArray(...args), new.target)
+      auto constructor_args_name = ast_value_factory()->empty_string();
+      bool is_duplicate;
+      bool is_rest = true;
+      bool is_optional = false;
+      Variable* constructor_args =
+          function_scope->DeclareParameter(constructor_args_name, TEMPORARY,
+                                           is_optional, is_rest, &is_duplicate);
+
       ZoneList<Expression*>* args =
           new (zone()) ZoneList<Expression*>(2, zone());
       VariableProxy* this_function_proxy = scope_->NewUnresolved(
@@ -217,10 +226,12 @@
       Expression* super_constructor = factory()->NewCallRuntime(
           Runtime::kInlineGetSuperConstructor, tmp, pos);
       args->Add(super_constructor, zone());
-      VariableProxy* arguments_proxy = scope_->NewUnresolved(
-          factory(), ast_value_factory()->arguments_string(), Variable::NORMAL,
-          pos);
-      args->Add(arguments_proxy, zone());
+      Spread* spread_args = factory()->NewSpread(
+          factory()->NewVariableProxy(constructor_args), pos, pos);
+      ZoneList<Expression*>* spread_args_expr =
+          new (zone()) ZoneList<Expression*>(1, zone());
+      spread_args_expr->Add(spread_args, zone());
+      args->AddAll(*PrepareSpreadArguments(spread_args_expr), zone());
       VariableProxy* new_target_proxy = scope_->NewUnresolved(
           factory(), ast_value_factory()->new_target_string(), Variable::NORMAL,
           pos);
@@ -669,13 +680,14 @@
 Expression* ParserTraits::FunctionSentExpression(Scope* scope,
                                                  AstNodeFactory* factory,
                                                  int pos) {
-  // We desugar function.sent into %GeneratorGetInput(generator).
+  // We desugar function.sent into %_GeneratorGetInputOrDebugPos(generator).
   Zone* zone = parser_->zone();
   ZoneList<Expression*>* args = new (zone) ZoneList<Expression*>(1, zone);
   VariableProxy* generator = factory->NewVariableProxy(
       parser_->function_state_->generator_object_variable());
   args->Add(generator, zone);
-  return factory->NewCallRuntime(Runtime::kGeneratorGetInput, args, pos);
+  return factory->NewCallRuntime(Runtime::kInlineGeneratorGetInputOrDebugPos,
+                                 args, pos);
 }
 
 
@@ -796,13 +808,13 @@
                       info->isolate()->is_tail_call_elimination_enabled());
   set_allow_harmony_do_expressions(FLAG_harmony_do_expressions);
   set_allow_harmony_for_in(FLAG_harmony_for_in);
-  set_allow_harmony_function_name(FLAG_harmony_function_name);
   set_allow_harmony_function_sent(FLAG_harmony_function_sent);
   set_allow_harmony_restrictive_declarations(
       FLAG_harmony_restrictive_declarations);
   set_allow_harmony_exponentiation_operator(
       FLAG_harmony_exponentiation_operator);
   set_allow_harmony_async_await(FLAG_harmony_async_await);
+  set_allow_harmony_restrictive_generators(FLAG_harmony_restrictive_generators);
   for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
        ++feature) {
     use_counts_[feature] = 0;
@@ -1971,10 +1983,24 @@
     } else if (IsLexicalVariableMode(mode) ||
                IsLexicalVariableMode(var->mode())) {
       // Allow duplicate function decls for web compat, see bug 4693.
+      bool duplicate_allowed = false;
       if (is_sloppy(language_mode()) && is_function_declaration &&
           var->is_function()) {
         DCHECK(IsLexicalVariableMode(mode) &&
                IsLexicalVariableMode(var->mode()));
+        // If the duplication is allowed, then the var will show up
+        // in the SloppyBlockFunctionMap and the new FunctionKind
+        // will be a permitted duplicate.
+        FunctionKind function_kind =
+            declaration->AsFunctionDeclaration()->fun()->kind();
+        duplicate_allowed =
+            scope->DeclarationScope()->sloppy_block_function_map()->Lookup(
+                const_cast<AstRawString*>(name), name->hash()) != nullptr &&
+            !IsAsyncFunction(function_kind) &&
+            !(allow_harmony_restrictive_generators() &&
+              IsGeneratorFunction(function_kind));
+      }
+      if (duplicate_allowed) {
         ++use_counts_[v8::Isolate::kSloppyModeBlockScopedFunctionRedefinition];
       } else {
         // The name was declared in this scope before; check for conflicting
@@ -2010,13 +2036,12 @@
     // In a var binding in a sloppy direct eval, pollute the enclosing scope
     // with this new binding by doing the following:
     // The proxy is bound to a lookup variable to force a dynamic declaration
-    // using the DeclareLookupSlot runtime function.
+    // using the DeclareEvalVar or DeclareEvalFunction runtime functions.
     Variable::Kind kind = Variable::NORMAL;
     // TODO(sigurds) figure out if kNotAssigned is OK here
     var = new (zone()) Variable(declaration_scope, name, mode, kind,
                                 declaration->initialization(), kNotAssigned);
     var->AllocateTo(VariableLocation::LOOKUP, -1);
-    var->SetFromEval();
     resolve = true;
   }
 
@@ -2036,7 +2061,7 @@
   // same variable if it is declared several times. This is not a
   // semantic issue as long as we keep the source order, but it may be
   // a performance issue since it may lead to repeated
-  // RuntimeHidden_DeclareLookupSlot calls.
+  // DeclareEvalVar or DeclareEvalFunction calls.
   declaration_scope->AddDeclaration(declaration);
 
   // If requested and we have a local variable, bind the proxy to the variable
@@ -2188,7 +2213,13 @@
   Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
   if (names) names->Add(name, zone());
   EmptyStatement* empty = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
-  if (is_sloppy(language_mode()) && !scope_->is_declaration_scope()) {
+  // Async functions don't undergo sloppy mode block scoped hoisting, and don't
+  // allow duplicates in a block. Both are represented by the
+  // sloppy_block_function_map. Don't add them to the map for async functions.
+  // Generators are also supposed to be prohibited; currently doing this behind
+  // a flag and UseCounting violations to assess web compatibility.
+  if (is_sloppy(language_mode()) && !scope_->is_declaration_scope() &&
+      !is_async && !(allow_harmony_restrictive_generators() && is_generator)) {
     SloppyBlockFunctionStatement* delegate =
         factory()->NewSloppyBlockFunctionStatement(empty, scope_);
     scope_->DeclarationScope()->sloppy_block_function_map()->Declare(name,
@@ -2412,9 +2443,7 @@
         }
       }
 
-      if (allow_harmony_function_name()) {
-        ParserTraits::SetFunctionNameFromIdentifierRef(value, pattern);
-      }
+      ParserTraits::SetFunctionNameFromIdentifierRef(value, pattern);
 
       // End position of the initializer is after the assignment expression.
       initializer_position = scanner()->location().end_pos;
@@ -2518,7 +2547,6 @@
       ReportUnexpectedToken(Next());
       *ok = false;
       return nullptr;
-
     default:
       break;
   }
@@ -2725,7 +2753,7 @@
       Expression* is_spec_object_call = factory()->NewCallRuntime(
           Runtime::kInlineIsJSReceiver, is_spec_object_args, pos);
 
-      // %_IsJSReceiver(temp) ? temp : throw_expression
+      // %_IsJSReceiver(temp) ? temp : 1;
       Expression* is_object_conditional = factory()->NewConditional(
           is_spec_object_call, factory()->NewVariableProxy(temp),
           factory()->NewSmiLiteral(1, pos), pos);
@@ -2744,7 +2772,7 @@
           function_state_, ReturnExprContext::kInsideValidReturnStatement);
       return_value = ParseExpression(true, CHECK_OK);
 
-      if (allow_tailcalls() && !is_sloppy(language_mode())) {
+      if (allow_tailcalls() && !is_sloppy(language_mode()) && !is_resumable()) {
         // ES6 14.6.1 Static Semantics: IsInTailPosition
         function_state_->AddImplicitTailCallExpression(return_value);
       }
@@ -2970,40 +2998,40 @@
     catch_scope = NewScope(scope_, CATCH_SCOPE);
     catch_scope->set_start_position(scanner()->location().beg_pos);
 
-    ExpressionClassifier pattern_classifier(this);
-    Expression* pattern = ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
-    ValidateBindingPattern(&pattern_classifier, CHECK_OK);
-
-    const AstRawString* name = ast_value_factory()->dot_catch_string();
-    bool is_simple = pattern->IsVariableProxy();
-    if (is_simple) {
-      auto proxy = pattern->AsVariableProxy();
-      scope_->RemoveUnresolved(proxy);
-      name = proxy->raw_name();
-    }
-
-    catch_variable = catch_scope->DeclareLocal(name, VAR, kCreatedInitialized,
-                                               Variable::NORMAL);
-
-    Expect(Token::RPAREN, CHECK_OK);
-
     {
       CollectExpressionsInTailPositionToListScope
           collect_tail_call_expressions_scope(
               function_state_, &tail_call_expressions_in_catch_block);
       BlockState block_state(&scope_, catch_scope);
 
-      // TODO(adamk): Make a version of ParseBlock that takes a scope and
-      // a block.
       catch_block =
           factory()->NewBlock(nullptr, 16, false, RelocInfo::kNoPosition);
-      Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
 
+      // Create a block scope to hold any lexical declarations created
+      // as part of destructuring the catch parameter.
+      Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
       block_scope->set_start_position(scanner()->location().beg_pos);
       {
         BlockState block_state(&scope_, block_scope);
         Target target(&this->target_stack_, catch_block);
 
+        ExpressionClassifier pattern_classifier(this);
+        Expression* pattern =
+            ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
+        ValidateBindingPattern(&pattern_classifier, CHECK_OK);
+
+        const AstRawString* name = ast_value_factory()->dot_catch_string();
+        bool is_simple = pattern->IsVariableProxy();
+        if (is_simple) {
+          auto proxy = pattern->AsVariableProxy();
+          scope_->RemoveUnresolved(proxy);
+          name = proxy->raw_name();
+        }
+        catch_variable = catch_scope->DeclareLocal(
+            name, VAR, kCreatedInitialized, Variable::NORMAL);
+
+        Expect(Token::RPAREN, CHECK_OK);
+
         if (!is_simple) {
           DeclarationDescriptor descriptor;
           descriptor.declaration_kind = DeclarationDescriptor::NORMAL;
@@ -3028,6 +3056,8 @@
           catch_block->statements()->Add(init_block, zone());
         }
 
+        // TODO(adamk): This should call ParseBlock in order to properly
+        // add an additional block scope for the catch body.
         Expect(Token::LBRACE, CHECK_OK);
         while (peek() != Token::RBRACE) {
           Statement* stat = ParseStatementListItem(CHECK_OK);
@@ -4705,7 +4735,7 @@
       // We produce:
       //
       // try { InitialYield; ...body...; return {value: undefined, done: true} }
-      // finally { %GeneratorClose(generator) }
+      // finally { %_GeneratorClose(generator) }
       //
       // - InitialYield yields the actual generator object.
       // - Any return statement inside the body will have its argument wrapped
@@ -4724,8 +4754,11 @@
             Token::INIT, init_proxy, allocation, RelocInfo::kNoPosition);
         VariableProxy* get_proxy = factory()->NewVariableProxy(
             function_state_->generator_object_variable());
-        Yield* yield =
-            factory()->NewYield(get_proxy, assignment, RelocInfo::kNoPosition);
+        // The position of the yield is important for reporting the exception
+        // caused by calling the .throw method on a generator suspended at the
+        // initial yield (i.e. right after generator instantiation).
+        Yield* yield = factory()->NewYield(get_proxy, assignment,
+                                           scope_->start_position());
         try_block->statements()->Add(
             factory()->NewExpressionStatement(yield, RelocInfo::kNoPosition),
             zone());
@@ -4745,7 +4778,7 @@
           function_state_->generator_object_variable());
       args->Add(call_proxy, zone());
       Expression* call = factory()->NewCallRuntime(
-          Runtime::kGeneratorClose, args, RelocInfo::kNoPosition);
+          Runtime::kInlineGeneratorClose, args, RelocInfo::kNoPosition);
       finally_block->statements()->Add(
           factory()->NewExpressionStatement(call, RelocInfo::kNoPosition),
           zone());
@@ -4844,7 +4877,6 @@
     SET_ALLOW(natives);
     SET_ALLOW(harmony_do_expressions);
     SET_ALLOW(harmony_for_in);
-    SET_ALLOW(harmony_function_name);
     SET_ALLOW(harmony_function_sent);
     SET_ALLOW(harmony_exponentiation_operator);
     SET_ALLOW(harmony_restrictive_declarations);
@@ -4943,8 +4975,7 @@
 
     if (fni_ != NULL) fni_->Infer();
 
-    if (allow_harmony_function_name() &&
-        property_name != ast_value_factory()->constructor_string()) {
+    if (property_name != ast_value_factory()->constructor_string()) {
       SetFunctionNameFromPropertyName(property, property_name);
     }
   }
@@ -4953,7 +4984,7 @@
   int end_pos = scanner()->location().end_pos;
 
   if (constructor == NULL) {
-    constructor = DefaultConstructor(name, extends != NULL, block_scope, pos,
+    constructor = DefaultConstructor(name, has_extends, block_scope, pos,
                                      end_pos, block_scope->language_mode());
   }
 
@@ -5189,7 +5220,7 @@
   // Move statistics to Isolate.
   for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
        ++feature) {
-    for (int i = 0; i < use_counts_[feature]; ++i) {
+    if (use_counts_[feature] > 0) {
       isolate->CountUsage(v8::Isolate::UseCounterFeature(feature));
     }
   }
@@ -5586,7 +5617,8 @@
   parser_->RewriteNonPattern(classifier, ok);
 }
 
-Expression* ParserTraits::RewriteAwaitExpression(Expression* value, int pos) {
+Expression* ParserTraits::RewriteAwaitExpression(Expression* value,
+                                                 int await_pos) {
   // yield %AsyncFunctionAwait(.generator_object, <operand>)
   Variable* generator_object_variable =
       parser_->function_state_->generator_object_variable();
@@ -5594,33 +5626,56 @@
   // If generator_object_variable is null,
   if (!generator_object_variable) return value;
 
-  Expression* generator_object =
-      parser_->factory()->NewVariableProxy(generator_object_variable);
+  auto factory = parser_->factory();
+  const int nopos = RelocInfo::kNoPosition;
+
+  Variable* temp_var = parser_->scope_->NewTemporary(
+      parser_->ast_value_factory()->empty_string());
+  VariableProxy* temp_proxy = factory->NewVariableProxy(temp_var);
+  Block* do_block = factory->NewBlock(nullptr, 2, false, nopos);
+
+  // Wrap value evaluation to provide a break location.
+  Expression* value_assignment =
+      factory->NewAssignment(Token::ASSIGN, temp_proxy, value, nopos);
+  do_block->statements()->Add(
+      factory->NewExpressionStatement(value_assignment, value->position()),
+      zone());
 
   ZoneList<Expression*>* async_function_await_args =
       new (zone()) ZoneList<Expression*>(2, zone());
+  Expression* generator_object =
+      factory->NewVariableProxy(generator_object_variable);
   async_function_await_args->Add(generator_object, zone());
-  async_function_await_args->Add(value, zone());
+  async_function_await_args->Add(temp_proxy, zone());
   Expression* async_function_await = parser_->factory()->NewCallRuntime(
-      Context::ASYNC_FUNCTION_AWAIT_INDEX, async_function_await_args,
-      RelocInfo::kNoPosition);
+      Context::ASYNC_FUNCTION_AWAIT_INDEX, async_function_await_args, nopos);
+  // Wrap await to provide a break location between value evaluation and yield.
+  Expression* await_assignment = factory->NewAssignment(
+      Token::ASSIGN, temp_proxy, async_function_await, nopos);
+  do_block->statements()->Add(
+      factory->NewExpressionStatement(await_assignment, await_pos), zone());
+  Expression* do_expr = factory->NewDoExpression(do_block, temp_var, nopos);
 
-  generator_object =
-      parser_->factory()->NewVariableProxy(generator_object_variable);
-  return parser_->factory()->NewYield(generator_object, async_function_await,
-                                      pos);
+  generator_object = factory->NewVariableProxy(generator_object_variable);
+  return factory->NewYield(generator_object, do_expr, nopos);
 }
 
-Zone* ParserTraits::zone() const {
-  return parser_->function_state_->scope()->zone();
-}
-
-
 ZoneList<Expression*>* ParserTraits::GetNonPatternList() const {
   return parser_->function_state_->non_patterns_to_rewrite();
 }
 
 
+ZoneList<typename ParserTraits::Type::ExpressionClassifier::Error>*
+ParserTraits::GetReportedErrorList() const {
+  return parser_->function_state_->GetReportedErrorList();
+}
+
+
+Zone* ParserTraits::zone() const {
+  return parser_->function_state_->scope()->zone();
+}
+
+
 class NonPatternRewriter : public AstExpressionRewriter {
  public:
   NonPatternRewriter(uintptr_t stack_limit, Parser* parser)
@@ -5834,9 +5889,9 @@
 }
 
 
-void ParserTraits::QueueNonPatternForRewriting(Expression* expr) {
+void ParserTraits::QueueNonPatternForRewriting(Expression* expr, bool* ok) {
   DCHECK(expr->IsRewritableExpression());
-  parser_->function_state_->AddNonPatternForRewriting(expr);
+  parser_->function_state_->AddNonPatternForRewriting(expr, ok);
 }
 
 
@@ -6529,8 +6584,6 @@
 void ParserTraits::FinalizeIteratorUse(Variable* completion,
                                        Expression* condition, Variable* iter,
                                        Block* iterator_use, Block* target) {
-  if (!FLAG_harmony_iterator_close) return;
-
   //
   // This function adds two statements to [target], corresponding to the
   // following code:
@@ -6813,8 +6866,6 @@
 
 
 Statement* ParserTraits::FinalizeForOfStatement(ForOfStatement* loop, int pos) {
-  if (!FLAG_harmony_iterator_close) return loop;
-
   //
   // This function replaces the loop with the following wrapping:
   //
diff --git a/src/parsing/parser.h b/src/parsing/parser.h
index 174b983..472dab9 100644
--- a/src/parsing/parser.h
+++ b/src/parsing/parser.h
@@ -658,7 +658,7 @@
 
   V8_INLINE void QueueDestructuringAssignmentForRewriting(
       Expression* assignment);
-  V8_INLINE void QueueNonPatternForRewriting(Expression* expr);
+  V8_INLINE void QueueNonPatternForRewriting(Expression* expr, bool* ok);
 
   void SetFunctionNameFromPropertyName(ObjectLiteralProperty* property,
                                        const AstRawString* name);
@@ -670,6 +670,8 @@
   V8_INLINE void RewriteNonPattern(Type::ExpressionClassifier* classifier,
                                    bool* ok);
 
+  V8_INLINE ZoneList<typename Type::ExpressionClassifier::Error>*
+      GetReportedErrorList() const;
   V8_INLINE Zone* zone() const;
 
   V8_INLINE ZoneList<Expression*>* GetNonPatternList() const;
diff --git a/src/parsing/pattern-rewriter.cc b/src/parsing/pattern-rewriter.cc
index 3dcff98..970231b 100644
--- a/src/parsing/pattern-rewriter.cc
+++ b/src/parsing/pattern-rewriter.cc
@@ -461,9 +461,7 @@
   // wrap this new block in a try-finally statement, restore block_ to its
   // original value, and add the try-finally statement to block_.
   auto target = block_;
-  if (FLAG_harmony_iterator_close) {
-    block_ = factory()->NewBlock(nullptr, 8, true, nopos);
-  }
+  block_ = factory()->NewBlock(nullptr, 8, true, nopos);
 
   Spread* spread = nullptr;
   for (Expression* value : *node->values()) {
@@ -551,7 +549,7 @@
     block_->statements()->Add(if_not_done, zone());
 
     if (!(value->IsLiteral() && value->AsLiteral()->raw_value()->IsTheHole())) {
-      if (FLAG_harmony_iterator_close) {
+      {
         // completion = kAbruptCompletion;
         Expression* proxy = factory()->NewVariableProxy(completion);
         Expression* assignment = factory()->NewAssignment(
@@ -563,7 +561,7 @@
 
       RecurseIntoSubpattern(value, factory()->NewVariableProxy(v));
 
-      if (FLAG_harmony_iterator_close) {
+      {
         // completion = kNormalCompletion;
         Expression* proxy = factory()->NewVariableProxy(completion);
         Expression* assignment = factory()->NewAssignment(
@@ -676,13 +674,11 @@
                           factory()->NewVariableProxy(array));
   }
 
-  if (FLAG_harmony_iterator_close) {
-    Expression* closing_condition = factory()->NewUnaryOperation(
-        Token::NOT, factory()->NewVariableProxy(done), nopos);
-    parser_->FinalizeIteratorUse(completion, closing_condition, iterator,
-                                 block_, target);
-    block_ = target;
-  }
+  Expression* closing_condition = factory()->NewUnaryOperation(
+      Token::NOT, factory()->NewVariableProxy(done), nopos);
+  parser_->FinalizeIteratorUse(completion, closing_condition, iterator, block_,
+                               target);
+  block_ = target;
 }
 
 
diff --git a/src/parsing/preparse-data.cc b/src/parsing/preparse-data.cc
index d02cd63..e1ef74c 100644
--- a/src/parsing/preparse-data.cc
+++ b/src/parsing/preparse-data.cc
@@ -2,11 +2,11 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/parsing/preparse-data.h"
+#include "src/base/hashmap.h"
 #include "src/base/logging.h"
 #include "src/globals.h"
-#include "src/hashmap.h"
 #include "src/parsing/parser.h"
-#include "src/parsing/preparse-data.h"
 #include "src/parsing/preparse-data-format.h"
 
 namespace v8 {
diff --git a/src/parsing/preparse-data.h b/src/parsing/preparse-data.h
index 1c99450..ddc4d03 100644
--- a/src/parsing/preparse-data.h
+++ b/src/parsing/preparse-data.h
@@ -6,8 +6,8 @@
 #define V8_PARSING_PREPARSE_DATA_H_
 
 #include "src/allocation.h"
+#include "src/base/hashmap.h"
 #include "src/collector.h"
-#include "src/hashmap.h"
 #include "src/messages.h"
 #include "src/parsing/preparse-data-format.h"
 
diff --git a/src/parsing/preparser.cc b/src/parsing/preparser.cc
index 0a091c6..08d5eaf 100644
--- a/src/parsing/preparser.cc
+++ b/src/parsing/preparser.cc
@@ -9,7 +9,6 @@
 #include "src/conversions-inl.h"
 #include "src/conversions.h"
 #include "src/globals.h"
-#include "src/hashmap.h"
 #include "src/list.h"
 #include "src/parsing/parser-base.h"
 #include "src/parsing/preparse-data-format.h"
diff --git a/src/parsing/preparser.h b/src/parsing/preparser.h
index 16eeab4..8eb95e7 100644
--- a/src/parsing/preparser.h
+++ b/src/parsing/preparser.h
@@ -7,7 +7,7 @@
 
 #include "src/ast/scopes.h"
 #include "src/bailout-reason.h"
-#include "src/hashmap.h"
+#include "src/base/hashmap.h"
 #include "src/messages.h"
 #include "src/parsing/expression-classifier.h"
 #include "src/parsing/func-name-inferrer.h"
@@ -974,7 +974,7 @@
   }
 
   inline void QueueDestructuringAssignmentForRewriting(PreParserExpression) {}
-  inline void QueueNonPatternForRewriting(PreParserExpression) {}
+  inline void QueueNonPatternForRewriting(PreParserExpression, bool* ok) {}
 
   void SetFunctionNameFromPropertyName(PreParserExpression,
                                        PreParserIdentifier) {}
@@ -987,6 +987,8 @@
   inline PreParserExpression RewriteAwaitExpression(PreParserExpression value,
                                                     int pos);
 
+  V8_INLINE ZoneList<typename Type::ExpressionClassifier::Error>*
+      GetReportedErrorList() const;
   V8_INLINE Zone* zone() const;
   V8_INLINE ZoneList<PreParserExpression>* GetNonPatternList() const;
 
@@ -1214,13 +1216,19 @@
   return value;
 }
 
-Zone* PreParserTraits::zone() const {
-  return pre_parser_->function_state_->scope()->zone();
+ZoneList<PreParserExpression>* PreParserTraits::GetNonPatternList() const {
+  return pre_parser_->function_state_->non_patterns_to_rewrite();
 }
 
 
-ZoneList<PreParserExpression>* PreParserTraits::GetNonPatternList() const {
-  return pre_parser_->function_state_->non_patterns_to_rewrite();
+ZoneList<typename PreParserTraits::Type::ExpressionClassifier::Error>*
+PreParserTraits::GetReportedErrorList() const {
+  return pre_parser_->function_state_->GetReportedErrorList();
+}
+
+
+Zone* PreParserTraits::zone() const {
+  return pre_parser_->function_state_->scope()->zone();
 }
 
 
diff --git a/src/parsing/scanner.cc b/src/parsing/scanner.cc
index 6a9b32e..5fc848f 100644
--- a/src/parsing/scanner.cc
+++ b/src/parsing/scanner.cc
@@ -839,9 +839,6 @@
 }
 
 
-const int kMaxAscii = 127;
-
-
 Token::Value Scanner::ScanString() {
   uc32 quote = c0_;
   Advance<false, false>();  // consume quote
@@ -858,7 +855,7 @@
       Advance<false, false>();
       return Token::STRING;
     }
-    uc32 c = c0_;
+    char c = static_cast<char>(c0_);
     if (c == '\\') break;
     Advance<false, false>();
     AddLiteralChar(c);
@@ -1283,7 +1280,7 @@
   LiteralScope literal(this);
   if (IsInRange(c0_, 'a', 'z')) {
     do {
-      uc32 first_char = c0_;
+      char first_char = static_cast<char>(c0_);
       Advance<false, false>();
       AddLiteralChar(first_char);
     } while (IsInRange(c0_, 'a', 'z'));
@@ -1291,11 +1288,11 @@
     if (IsDecimalDigit(c0_) || IsInRange(c0_, 'A', 'Z') || c0_ == '_' ||
         c0_ == '$') {
       // Identifier starting with lowercase.
-      uc32 first_char = c0_;
+      char first_char = static_cast<char>(c0_);
       Advance<false, false>();
       AddLiteralChar(first_char);
       while (IsAsciiIdentifier(c0_)) {
-        uc32 first_char = c0_;
+        char first_char = static_cast<char>(c0_);
         Advance<false, false>();
         AddLiteralChar(first_char);
       }
@@ -1313,7 +1310,7 @@
     HandleLeadSurrogate();
   } else if (IsInRange(c0_, 'A', 'Z') || c0_ == '_' || c0_ == '$') {
     do {
-      uc32 first_char = c0_;
+      char first_char = static_cast<char>(c0_);
       Advance<false, false>();
       AddLiteralChar(first_char);
     } while (IsAsciiIdentifier(c0_));
@@ -1456,7 +1453,6 @@
         flag = RegExp::kMultiline;
         break;
       case 'u':
-        if (!FLAG_harmony_unicode_regexps) return Nothing<RegExp::Flags>();
         flag = RegExp::kUnicode;
         break;
       case 'y':
@@ -1590,7 +1586,7 @@
                                int value) {
   uint32_t hash = Hash(key, is_one_byte);
   byte* encoding = BackupKey(key, is_one_byte);
-  HashMap::Entry* entry = map_.LookupOrInsert(encoding, hash);
+  base::HashMap::Entry* entry = map_.LookupOrInsert(encoding, hash);
   int old_value = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
   entry->value =
     reinterpret_cast<void*>(static_cast<intptr_t>(value | old_value));
diff --git a/src/parsing/scanner.h b/src/parsing/scanner.h
index 0acc7ab..610091c 100644
--- a/src/parsing/scanner.h
+++ b/src/parsing/scanner.h
@@ -8,16 +8,16 @@
 #define V8_PARSING_SCANNER_H_
 
 #include "src/allocation.h"
+#include "src/base/hashmap.h"
 #include "src/base/logging.h"
 #include "src/char-predicates.h"
 #include "src/collector.h"
 #include "src/globals.h"
-#include "src/hashmap.h"
 #include "src/list.h"
 #include "src/messages.h"
 #include "src/parsing/token.h"
-#include "src/unicode.h"
 #include "src/unicode-decoder.h"
+#include "src/unicode.h"
 
 namespace v8 {
 namespace internal {
@@ -143,22 +143,32 @@
   UnicodeCache* unicode_constants_;
   // Backing store used to store strings used as hashmap keys.
   SequenceCollector<unsigned char> backing_store_;
-  HashMap map_;
+  base::HashMap map_;
   // Buffer used for string->number->canonical string conversions.
   char number_buffer_[kBufferSize];
 };
 
-
 // ----------------------------------------------------------------------------
 // LiteralBuffer -  Collector of chars of literals.
 
+const int kMaxAscii = 127;
+
 class LiteralBuffer {
  public:
   LiteralBuffer() : is_one_byte_(true), position_(0), backing_store_() { }
 
   ~LiteralBuffer() { backing_store_.Dispose(); }
 
-  INLINE(void AddChar(uint32_t code_unit)) {
+  INLINE(void AddChar(char code_unit)) {
+    if (position_ >= backing_store_.length()) ExpandBuffer();
+    DCHECK(is_one_byte_);
+    DCHECK(0 <= code_unit && code_unit <= kMaxAscii);
+    backing_store_[position_] = static_cast<byte>(code_unit);
+    position_ += kOneByteSize;
+    return;
+  }
+
+  INLINE(void AddChar(uc32 code_unit)) {
     if (position_ >= backing_store_.length()) ExpandBuffer();
     if (is_one_byte_) {
       if (code_unit <= unibrow::Latin1::kMaxChar) {
@@ -557,6 +567,11 @@
     next_.literal_chars->AddChar(c);
   }
 
+  INLINE(void AddLiteralChar(char c)) {
+    DCHECK_NOT_NULL(next_.literal_chars);
+    next_.literal_chars->AddChar(c);
+  }
+
   INLINE(void AddRawLiteralChar(uc32 c)) {
     DCHECK_NOT_NULL(next_.raw_literal_chars);
     next_.raw_literal_chars->AddChar(c);
diff --git a/src/perf-jit.cc b/src/perf-jit.cc
index 6f35514..df251fd 100644
--- a/src/perf-jit.cc
+++ b/src/perf-jit.cc
@@ -28,6 +28,7 @@
 #include "src/perf-jit.h"
 
 #include "src/assembler.h"
+#include "src/eh-frame.h"
 #include "src/objects-inl.h"
 
 #if V8_OS_LINUX
@@ -56,7 +57,13 @@
 };
 
 struct PerfJitBase {
-  enum PerfJitEvent { kLoad = 0, kMove = 1, kDebugInfo = 2, kClose = 3 };
+  enum PerfJitEvent {
+    kLoad = 0,
+    kMove = 1,
+    kDebugInfo = 2,
+    kClose = 3,
+    kUnwindingInfo = 4
+  };
 
   uint32_t event_;
   uint32_t size_;
@@ -85,6 +92,13 @@
   // Followed by entry_count_ instances of PerfJitDebugEntry.
 };
 
+struct PerfJitCodeUnwindingInfo : PerfJitBase {
+  uint64_t unwinding_size_;
+  uint64_t eh_frame_hdr_size_;
+  uint64_t mapped_size_;
+  // Followed by size_ - sizeof(PerfJitCodeUnwindingInfo) bytes of data.
+};
+
 const char PerfJitLogger::kFilenameFormatString[] = "./jit-%d.dump";
 
 // Extra padding for the PID in the filename
@@ -204,6 +218,9 @@
   uint32_t code_size = code->is_crankshafted() ? code->safepoint_table_offset()
                                                : code->instruction_size();
 
+  // Unwinding info comes right after debug info.
+  if (FLAG_perf_prof_unwinding_info) LogWriteUnwindingInfo(code);
+
   static const char string_terminator[] = "\0";
 
   PerfJitCodeLoad code_load;
@@ -303,6 +320,46 @@
   LogWriteBytes(padding_bytes, padding);
 }
 
+void PerfJitLogger::LogWriteUnwindingInfo(Code* code) {
+  EhFrameHdr eh_frame_hdr(code);
+
+  PerfJitCodeUnwindingInfo unwinding_info_header;
+  unwinding_info_header.event_ = PerfJitCodeLoad::kUnwindingInfo;
+  unwinding_info_header.time_stamp_ = GetTimestamp();
+  unwinding_info_header.eh_frame_hdr_size_ = EhFrameHdr::kRecordSize;
+
+  if (code->has_unwinding_info()) {
+    unwinding_info_header.unwinding_size_ = code->unwinding_info_size();
+    unwinding_info_header.mapped_size_ = unwinding_info_header.unwinding_size_;
+  } else {
+    unwinding_info_header.unwinding_size_ = EhFrameHdr::kRecordSize;
+    unwinding_info_header.mapped_size_ = 0;
+  }
+
+  int content_size = static_cast<int>(sizeof(unwinding_info_header) +
+                                      unwinding_info_header.unwinding_size_);
+  int padding_size = RoundUp(content_size, 8) - content_size;
+  unwinding_info_header.size_ = content_size + padding_size;
+
+  LogWriteBytes(reinterpret_cast<const char*>(&unwinding_info_header),
+                sizeof(unwinding_info_header));
+
+  if (code->has_unwinding_info()) {
+    // The last EhFrameHdr::kRecordSize bytes were a placeholder for the header.
+    // Discard them and write the actual eh_frame_hdr (below).
+    DCHECK_GE(code->unwinding_info_size(), EhFrameHdr::kRecordSize);
+    LogWriteBytes(reinterpret_cast<const char*>(code->unwinding_info_start()),
+                  code->unwinding_info_size() - EhFrameHdr::kRecordSize);
+  }
+
+  LogWriteBytes(reinterpret_cast<const char*>(&eh_frame_hdr),
+                EhFrameHdr::kRecordSize);
+
+  char padding_bytes[] = "\0\0\0\0\0\0\0\0";
+  DCHECK_LT(padding_size, sizeof(padding_bytes));
+  LogWriteBytes(padding_bytes, padding_size);
+}
+
 void PerfJitLogger::CodeMoveEvent(AbstractCode* from, Address to) {
   // Code relocation not supported.
   UNREACHABLE();
diff --git a/src/perf-jit.h b/src/perf-jit.h
index 25cc3b3..6efa4bb 100644
--- a/src/perf-jit.h
+++ b/src/perf-jit.h
@@ -66,11 +66,13 @@
   void LogWriteBytes(const char* bytes, int size);
   void LogWriteHeader();
   void LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared);
+  void LogWriteUnwindingInfo(Code* code);
 
   static const uint32_t kElfMachIA32 = 3;
   static const uint32_t kElfMachX64 = 62;
   static const uint32_t kElfMachARM = 40;
   static const uint32_t kElfMachMIPS = 10;
+  static const uint32_t kElfMachARM64 = 183;
 
   uint32_t GetElfMach() {
 #if V8_TARGET_ARCH_IA32
@@ -81,6 +83,8 @@
     return kElfMachARM;
 #elif V8_TARGET_ARCH_MIPS
     return kElfMachMIPS;
+#elif V8_TARGET_ARCH_ARM64
+    return kElfMachARM64;
 #else
     UNIMPLEMENTED();
     return 0;
diff --git a/src/ppc/OWNERS b/src/ppc/OWNERS
index eb007cb..752e8e3 100644
--- a/src/ppc/OWNERS
+++ b/src/ppc/OWNERS
@@ -3,3 +3,4 @@
 joransiu@ca.ibm.com
 mbrandy@us.ibm.com
 michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/src/ppc/assembler-ppc.cc b/src/ppc/assembler-ppc.cc
index bf59955..2ce601e 100644
--- a/src/ppc/assembler-ppc.cc
+++ b/src/ppc/assembler-ppc.cc
@@ -166,31 +166,21 @@
      reinterpret_cast<intptr_t>(Assembler::target_address_at(pc_, host_)));
 }
 
-void RelocInfo::update_wasm_memory_reference(
-    Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
-    ICacheFlushMode icache_flush_mode) {
-  DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
-  if (IsWasmMemoryReference(rmode_)) {
-    Address updated_memory_reference;
-    DCHECK(old_base <= wasm_memory_reference() &&
-           wasm_memory_reference() < old_base + old_size);
-    updated_memory_reference = new_base + (wasm_memory_reference() - old_base);
-    DCHECK(new_base <= updated_memory_reference &&
-           updated_memory_reference < new_base + new_size);
-    Assembler::set_target_address_at(
-        isolate_, pc_, host_, updated_memory_reference, icache_flush_mode);
-  } else if (IsWasmMemorySizeReference(rmode_)) {
-    uint32_t updated_size_reference;
-    DCHECK(wasm_memory_size_reference() <= old_size);
-    updated_size_reference =
-        new_size + (wasm_memory_size_reference() - old_size);
-    DCHECK(updated_size_reference <= new_size);
-    Assembler::set_target_address_at(
-        isolate_, pc_, host_, reinterpret_cast<Address>(updated_size_reference),
-        icache_flush_mode);
-  } else {
-    UNREACHABLE();
-  }
+Address RelocInfo::wasm_global_reference() {
+  DCHECK(IsWasmGlobalReference(rmode_));
+  return Assembler::target_address_at(pc_, host_);
+}
+
+
+void RelocInfo::unchecked_update_wasm_memory_reference(
+    Address address, ICacheFlushMode flush_mode) {
+  Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
+}
+
+void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
+                                                  ICacheFlushMode flush_mode) {
+  Assembler::set_target_address_at(isolate_, pc_, host_,
+                                   reinterpret_cast<Address>(size), flush_mode);
 }
 
 // -----------------------------------------------------------------------------
@@ -267,6 +257,8 @@
   desc->constant_pool_size =
       (constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
   desc->origin = this;
+  desc->unwinding_info_size = 0;
+  desc->unwinding_info = nullptr;
 }
 
 
@@ -715,13 +707,11 @@
 
 
 void Assembler::bclr(BOfield bo, int condition_bit, LKBit lk) {
-  positions_recorder()->WriteRecordedPositions();
   emit(EXT1 | bo | condition_bit * B16 | BCLRX | lk);
 }
 
 
 void Assembler::bcctr(BOfield bo, int condition_bit, LKBit lk) {
-  positions_recorder()->WriteRecordedPositions();
   emit(EXT1 | bo | condition_bit * B16 | BCCTRX | lk);
 }
 
@@ -738,9 +728,6 @@
 
 
 void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) {
-  if (lk == SetLK) {
-    positions_recorder()->WriteRecordedPositions();
-  }
   int imm16 = branch_offset;
   CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
   emit(BCX | bo | condition_bit * B16 | (imm16 & kImm16Mask) | lk);
@@ -748,9 +735,6 @@
 
 
 void Assembler::b(int branch_offset, LKBit lk) {
-  if (lk == SetLK) {
-    positions_recorder()->WriteRecordedPositions();
-  }
   int imm26 = branch_offset;
   CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
   emit(BX | (imm26 & kImm26Mask) | lk);
diff --git a/src/ppc/assembler-ppc.h b/src/ppc/assembler-ppc.h
index a9cf730..24166e3 100644
--- a/src/ppc/assembler-ppc.h
+++ b/src/ppc/assembler-ppc.h
@@ -166,8 +166,6 @@
     Register r = {code};
     return r;
   }
-  const char* ToString();
-  bool IsAllocatable() const;
   bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
   bool is(Register reg) const { return reg_code == reg.reg_code; }
   int code() const {
@@ -206,6 +204,8 @@
 const Register kRootRegister = r29;          // Roots array pointer.
 const Register cp = r30;                     // JavaScript context pointer.
 
+static const bool kSimpleFPAliasing = true;
+
 // Double word FP register.
 struct DoubleRegister {
   enum Code {
@@ -219,8 +219,6 @@
   static const int kNumRegisters = Code::kAfterLast;
   static const int kMaxNumRegisters = kNumRegisters;
 
-  const char* ToString();
-  bool IsAllocatable() const;
   bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
   bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
   int code() const {
diff --git a/src/ppc/builtins-ppc.cc b/src/ppc/builtins-ppc.cc
index a6263cd..c3c2b84 100644
--- a/src/ppc/builtins-ppc.cc
+++ b/src/ppc/builtins-ppc.cc
@@ -17,8 +17,7 @@
 #define __ ACCESS_MASM(masm)
 
 
-void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id,
-                                BuiltinExtraArguments extra_args) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
   // ----------- S t a t e -------------
   //  -- r3                 : number of arguments excluding receiver
   //  -- r4                 : target
@@ -37,23 +36,8 @@
   __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
 
   // Insert extra arguments.
-  int num_extra_args = 0;
-  switch (extra_args) {
-    case BuiltinExtraArguments::kTarget:
-      __ Push(r4);
-      ++num_extra_args;
-      break;
-    case BuiltinExtraArguments::kNewTarget:
-      __ Push(r6);
-      ++num_extra_args;
-      break;
-    case BuiltinExtraArguments::kTargetAndNewTarget:
-      __ Push(r4, r6);
-      num_extra_args += 2;
-      break;
-    case BuiltinExtraArguments::kNone:
-      break;
-  }
+  const int num_extra_args = 2;
+  __ Push(r4, r6);
 
   // JumpToExternalReference expects r3 to contain the number of arguments
   // including the receiver and the extra arguments.
@@ -139,6 +123,8 @@
 void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
   // ----------- S t a t e -------------
   //  -- r3                 : number of arguments
+  //  -- r4                 : function
+  //  -- cp                 : context
   //  -- lr                 : return address
   //  -- sp[(argc - n) * 8] : arg[n] (zero-based)
   //  -- sp[(argc + 1) * 8] : receiver
@@ -150,58 +136,69 @@
   DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? d2 : d1;
 
   // Load the accumulator with the default return value (either -Infinity or
-  // +Infinity), with the tagged value in r4 and the double value in d1.
-  __ LoadRoot(r4, root_index);
-  __ lfd(d1, FieldMemOperand(r4, HeapNumber::kValueOffset));
+  // +Infinity), with the tagged value in r8 and the double value in d1.
+  __ LoadRoot(r8, root_index);
+  __ lfd(d1, FieldMemOperand(r8, HeapNumber::kValueOffset));
 
   // Setup state for loop
   // r5: address of arg[0] + kPointerSize
   // r6: number of slots to drop at exit (arguments + receiver)
-  __ ShiftLeftImm(r5, r3, Operand(kPointerSizeLog2));
-  __ add(r5, sp, r5);
-  __ addi(r6, r3, Operand(1));
+  __ addi(r7, r3, Operand(1));
 
   Label done_loop, loop;
   __ bind(&loop);
   {
     // Check if all parameters done.
-    __ cmpl(r5, sp);
-    __ ble(&done_loop);
+    __ subi(r3, r3, Operand(1));
+    __ cmpi(r3, Operand::Zero());
+    __ blt(&done_loop);
 
-    // Load the next parameter tagged value into r3.
-    __ LoadPU(r3, MemOperand(r5, -kPointerSize));
+    // Load the next parameter tagged value into r5.
+    __ ShiftLeftImm(r5, r3, Operand(kPointerSizeLog2));
+    __ LoadPX(r5, MemOperand(sp, r5));
 
     // Load the double value of the parameter into d2, maybe converting the
-    // parameter to a number first using the ToNumberStub if necessary.
+    // parameter to a number first using the ToNumber builtin if necessary.
     Label convert, convert_smi, convert_number, done_convert;
     __ bind(&convert);
-    __ JumpIfSmi(r3, &convert_smi);
-    __ LoadP(r7, FieldMemOperand(r3, HeapObject::kMapOffset));
-    __ JumpIfRoot(r7, Heap::kHeapNumberMapRootIndex, &convert_number);
+    __ JumpIfSmi(r5, &convert_smi);
+    __ LoadP(r6, FieldMemOperand(r5, HeapObject::kMapOffset));
+    __ JumpIfRoot(r6, Heap::kHeapNumberMapRootIndex, &convert_number);
     {
-      // Parameter is not a Number, use the ToNumberStub to convert it.
-      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-      __ SmiTag(r6);
-      __ Push(r4, r5, r6);
-      ToNumberStub stub(masm->isolate());
-      __ CallStub(&stub);
-      __ Pop(r4, r5, r6);
-      __ SmiUntag(r6);
+      // Parameter is not a Number, use the ToNumber builtin to convert it.
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ PushStandardFrame(r4);
+      __ SmiTag(r3);
+      __ SmiTag(r7);
+      __ Push(r3, r7, r8);
+      __ mr(r3, r5);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+      __ mr(r5, r3);
+      __ Pop(r3, r7, r8);
       {
         // Restore the double accumulator value (d1).
         Label done_restore;
-        __ SmiToDouble(d1, r4);
-        __ JumpIfSmi(r4, &done_restore);
-        __ lfd(d1, FieldMemOperand(r4, HeapNumber::kValueOffset));
+        __ SmiToDouble(d1, r8);
+        __ JumpIfSmi(r8, &done_restore);
+        __ lfd(d1, FieldMemOperand(r8, HeapNumber::kValueOffset));
         __ bind(&done_restore);
       }
+      __ SmiUntag(r7);
+      __ SmiUntag(r3);
+      // TODO(Jaideep): Add macro furtion for PopStandardFrame
+      if (FLAG_enable_embedded_constant_pool) {
+        __ Pop(r0, fp, kConstantPoolRegister, cp, r4);
+      } else {
+        __ Pop(r0, fp, cp, r4);
+      }
+      __ mtlr(r0);
     }
     __ b(&convert);
     __ bind(&convert_number);
-    __ lfd(d2, FieldMemOperand(r3, HeapNumber::kValueOffset));
+    __ lfd(d2, FieldMemOperand(r5, HeapNumber::kValueOffset));
     __ b(&done_convert);
     __ bind(&convert_smi);
-    __ SmiToDouble(d2, r3);
+    __ SmiToDouble(d2, r5);
     __ bind(&done_convert);
 
     // Perform the actual comparison with the accumulator value on the left hand
@@ -213,26 +210,26 @@
     __ b(CommuteCondition(cond_done), &compare_swap);
 
     // Left and right hand side are equal, check for -0 vs. +0.
-    __ TestDoubleIsMinusZero(reg, r7, r8);
+    __ TestDoubleIsMinusZero(reg, r9, r0);
     __ bne(&loop);
 
     // Update accumulator. Result is on the right hand side.
     __ bind(&compare_swap);
     __ fmr(d1, d2);
-    __ mr(r4, r3);
+    __ mr(r8, r5);
     __ b(&loop);
 
     // At least one side is NaN, which means that the result will be NaN too.
     // We still need to visit the rest of the arguments.
     __ bind(&compare_nan);
-    __ LoadRoot(r4, Heap::kNanValueRootIndex);
-    __ lfd(d1, FieldMemOperand(r4, HeapNumber::kValueOffset));
+    __ LoadRoot(r8, Heap::kNanValueRootIndex);
+    __ lfd(d1, FieldMemOperand(r8, HeapNumber::kValueOffset));
     __ b(&loop);
   }
 
   __ bind(&done_loop);
-  __ mr(r3, r4);
-  __ Drop(r6);
+  __ mr(r3, r8);
+  __ Drop(r7);
   __ Ret();
 }
 
@@ -259,8 +256,7 @@
   }
 
   // 2a. Convert the first argument to a number.
-  ToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
+  __ Jump(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
 
   // 2b. No arguments, return +0.
   __ bind(&no_arguments);
@@ -310,8 +306,7 @@
       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
       __ Push(r4, r6);
       __ mr(r3, r5);
-      ToNumberStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
       __ mr(r5, r3);
       __ Pop(r4, r6);
     }
@@ -711,8 +706,9 @@
   __ AssertGeneratorObject(r4);
 
   // Store input value into generator object.
-  __ StoreP(r3, FieldMemOperand(r4, JSGeneratorObject::kInputOffset), r0);
-  __ RecordWriteField(r4, JSGeneratorObject::kInputOffset, r3, r6,
+  __ StoreP(r3, FieldMemOperand(r4, JSGeneratorObject::kInputOrDebugPosOffset),
+            r0);
+  __ RecordWriteField(r4, JSGeneratorObject::kInputOrDebugPosOffset, r3, r6,
                       kLRHasNotBeenSaved, kDontSaveFPRegs);
 
   // Store resume mode into generator object.
@@ -723,21 +719,27 @@
   __ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
 
   // Flood function if we are stepping.
-  Label skip_flooding;
-  ExternalReference step_in_enabled =
-      ExternalReference::debug_step_in_enabled_address(masm->isolate());
-  __ mov(ip, Operand(step_in_enabled));
-  __ lbz(ip, MemOperand(ip));
-  __ cmpi(ip, Operand::Zero());
-  __ beq(&skip_flooding);
-  {
-    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    __ Push(r4, r5, r7);
-    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
-    __ Pop(r4, r5);
-    __ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
-  }
-  __ bind(&skip_flooding);
+  Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+  Label stepping_prepared;
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(masm->isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  __ mov(ip, Operand(last_step_action));
+  __ LoadByte(ip, MemOperand(ip), r0);
+  __ extsb(ip, ip);
+  __ cmpi(ip, Operand(StepIn));
+  __ bge(&prepare_step_in_if_stepping);
+
+  // Flood function if we need to continue stepping in the suspended generator.
+
+  ExternalReference debug_suspended_generator =
+      ExternalReference::debug_suspended_generator_address(masm->isolate());
+
+  __ mov(ip, Operand(debug_suspended_generator));
+  __ LoadP(ip, MemOperand(ip));
+  __ cmp(ip, r4);
+  __ beq(&prepare_step_in_suspended_generator);
+  __ bind(&stepping_prepared);
 
   // Push receiver.
   __ LoadP(ip, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset));
@@ -843,6 +845,26 @@
       __ Jump(r6);
     }
   }
+
+  __ bind(&prepare_step_in_if_stepping);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r4, r5, r7);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(r4, r5);
+    __ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
+  }
+  __ b(&stepping_prepared);
+
+  __ bind(&prepare_step_in_suspended_generator);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r4, r5);
+    __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+    __ Pop(r4, r5);
+    __ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
+  }
+  __ b(&stepping_prepared);
 }
 
 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
@@ -968,6 +990,20 @@
   Generate_JSEntryTrampolineHelper(masm, true);
 }
 
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
+  Register args_count = scratch;
+
+  // Get the arguments + receiver count.
+  __ LoadP(args_count,
+           MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ lwz(args_count,
+         FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+  // Leave the frame (also dropping the register file).
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+
+  __ add(sp, sp, args_count);
+}
 
 // Generate code for entering a JS function with the interpreter.
 // On entry to the function the receiver and arguments have been pushed on the
@@ -1077,15 +1113,7 @@
   masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
 
   // The return value is in r3.
-
-  // Get the arguments + reciever count.
-  __ LoadP(r5, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
-  __ lwz(r5, FieldMemOperand(r5, BytecodeArray::kParameterSizeOffset));
-
-  // Leave the frame (also dropping the register file).
-  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
-
-  __ add(sp, sp, r5);
+  LeaveInterpreterFrame(masm, r5);
   __ blr();
 
   // If the bytecode array is no longer present, then the underlying function
@@ -1101,6 +1129,30 @@
   __ JumpToJSEntry(r7);
 }
 
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+  // Save the function and context for call to CompileBaseline.
+  __ LoadP(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+  __ LoadP(kContextRegister,
+           MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+  // Leave the frame before recompiling for baseline so that we don't count as
+  // an activation on the stack.
+  LeaveInterpreterFrame(masm, r5);
+
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    // Push return value.
+    __ push(r3);
+
+    // Push function as argument and compile for baseline.
+    __ push(r4);
+    __ CallRuntime(Runtime::kCompileBaseline);
+
+    // Restore return value.
+    __ pop(r3);
+  }
+  __ blr();
+}
 
 static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
                                          Register count, Register scratch) {
@@ -1687,6 +1739,9 @@
 void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
                                                int field_index) {
   // ----------- S t a t e -------------
+  //  -- r3    : number of arguments
+  //  -- r4    : function
+  //  -- cp    : context
   //  -- lr    : return address
   //  -- sp[0] : receiver
   // -----------------------------------
@@ -1696,7 +1751,7 @@
   {
     __ Pop(r3);
     __ JumpIfSmi(r3, &receiver_not_date);
-    __ CompareObjectType(r3, r4, r5, JS_DATE_TYPE);
+    __ CompareObjectType(r3, r5, r6, JS_DATE_TYPE);
     __ bne(&receiver_not_date);
   }
 
@@ -1726,7 +1781,14 @@
 
   // 3. Raise a TypeError if the receiver is not a date.
   __ bind(&receiver_not_date);
-  __ TailCallRuntime(Runtime::kThrowNotDateError);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ push(r3);
+    __ PushStandardFrame(r4);
+    __ LoadSmiLiteral(r7, Smi::FromInt(0));
+    __ push(r7);
+    __ CallRuntime(Runtime::kThrowNotDateError);
+  }
 }
 
 // static
@@ -2696,6 +2758,76 @@
   __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
 }
 
+// static
+void Builtins::Generate_StringToNumber(MacroAssembler* masm) {
+  // The StringToNumber stub takes one argument in r3.
+  __ AssertString(r3);
+
+  // Check if string has a cached array index.
+  Label runtime;
+  __ lwz(r5, FieldMemOperand(r3, String::kHashFieldOffset));
+  __ And(r0, r5, Operand(String::kContainsCachedArrayIndexMask), SetRC);
+  __ bne(&runtime, cr0);
+  __ IndexFromHash(r5, r3);
+  __ blr();
+
+  __ bind(&runtime);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    // Push argument.
+    __ push(r3);
+    // We cannot use a tail call here because this builtin can also be called
+    // from wasm.
+    __ CallRuntime(Runtime::kStringToNumber);
+  }
+  __ Ret();
+}
+
+// static
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+  // The ToNumber stub takes one argument in r3.
+  STATIC_ASSERT(kSmiTag == 0);
+  __ TestIfSmi(r3, r0);
+  __ Ret(eq, cr0);
+
+  __ CompareObjectType(r3, r4, r4, HEAP_NUMBER_TYPE);
+  // r3: receiver
+  // r4: receiver instance type
+  __ Ret(eq);
+
+  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+          RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_NonNumberToNumber(MacroAssembler* masm) {
+  // The NonNumberToNumber stub takes one argument in r3.
+  __ AssertNotNumber(r3);
+
+  __ CompareObjectType(r3, r4, r4, FIRST_NONSTRING_TYPE);
+  // r3: receiver
+  // r4: receiver instance type
+  __ Jump(masm->isolate()->builtins()->StringToNumber(), RelocInfo::CODE_TARGET,
+          lt);
+
+  Label not_oddball;
+  __ cmpi(r4, Operand(ODDBALL_TYPE));
+  __ bne(&not_oddball);
+  __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset));
+  __ blr();
+  __ bind(&not_oddball);
+
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    // Push argument.
+    __ push(r3);
+    // We cannot use a tail call here because this builtin can also be called
+    // from wasm.
+    __ CallRuntime(Runtime::kToNumber);
+  }
+  __ Ret();
+}
+
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r3 : actual number of arguments
diff --git a/src/ppc/code-stubs-ppc.cc b/src/ppc/code-stubs-ppc.cc
index f0f74c3..6065d02 100644
--- a/src/ppc/code-stubs-ppc.cc
+++ b/src/ppc/code-stubs-ppc.cc
@@ -21,70 +21,28 @@
 namespace v8 {
 namespace internal {
 
+#define __ ACCESS_MASM(masm)
 
-static void InitializeArrayConstructorDescriptor(
-    Isolate* isolate, CodeStubDescriptor* descriptor,
-    int constant_stack_parameter_count) {
-  Address deopt_handler =
-      Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
-
-  if (constant_stack_parameter_count == 0) {
-    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  } else {
-    descriptor->Initialize(r3, deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  }
+void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
+  __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
+  __ StorePX(r4, MemOperand(sp, r0));
+  __ push(r4);
+  __ push(r5);
+  __ addi(r3, r3, Operand(3));
+  __ TailCallRuntime(Runtime::kNewArray);
 }
 
-
-static void InitializeInternalArrayConstructorDescriptor(
-    Isolate* isolate, CodeStubDescriptor* descriptor,
-    int constant_stack_parameter_count) {
-  Address deopt_handler =
-      Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
-
-  if (constant_stack_parameter_count == 0) {
-    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  } else {
-    descriptor->Initialize(r3, deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  }
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
   Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
   descriptor->Initialize(r3, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
 }
 
-void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+void FastFunctionBindStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
+  Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
+  descriptor->Initialize(r3, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
 }
 
-
-void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
-#define __ ACCESS_MASM(masm)
-
 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
                                           Condition cond);
 static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
@@ -988,7 +946,7 @@
   CEntryStub::GenerateAheadOfTime(isolate);
   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
-  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+  CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
   CreateWeakCellStub::GenerateAheadOfTime(isolate);
   BinaryOpICStub::GenerateAheadOfTime(isolate);
@@ -1407,7 +1365,6 @@
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
                                           &miss,  // When index out of range.
-                                          STRING_INDEX_IS_ARRAY_INDEX,
                                           RECEIVER_IS_STRING);
   char_at_generator.GenerateFast(masm);
   __ Ret();
@@ -1860,12 +1817,15 @@
   // r5 : feedback vector
   // r6 : slot in feedback vector (Smi)
   Label initialize, done, miss, megamorphic, not_array_function;
+  Label done_initialize_count, done_increment_count;
 
   DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
             masm->isolate()->heap()->megamorphic_symbol());
   DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
             masm->isolate()->heap()->uninitialized_symbol());
 
+  const int count_offset = FixedArray::kHeaderSize + kPointerSize;
+
   // Load the cache state into r8.
   __ SmiToPtrArrayOffset(r8, r6);
   __ add(r8, r5, r8);
@@ -1880,7 +1840,7 @@
   Register weak_value = r10;
   __ LoadP(weak_value, FieldMemOperand(r8, WeakCell::kValueOffset));
   __ cmp(r4, weak_value);
-  __ beq(&done);
+  __ beq(&done_increment_count);
   __ CompareRoot(r8, Heap::kmegamorphic_symbolRootIndex);
   __ beq(&done);
   __ LoadP(feedback_map, FieldMemOperand(r8, HeapObject::kMapOffset));
@@ -1903,7 +1863,7 @@
   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
   __ cmp(r4, r8);
   __ bne(&megamorphic);
-  __ b(&done);
+  __ b(&done_increment_count);
 
   __ bind(&miss);
 
@@ -1933,12 +1893,31 @@
   // slot.
   CreateAllocationSiteStub create_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &create_stub);
-  __ b(&done);
+  __ b(&done_initialize_count);
 
   __ bind(&not_array_function);
 
   CreateWeakCellStub weak_cell_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &weak_cell_stub);
+
+  __ bind(&done_initialize_count);
+  // Initialize the call counter.
+  __ LoadSmiLiteral(r8, Smi::FromInt(1));
+  __ SmiToPtrArrayOffset(r7, r6);
+  __ add(r7, r5, r7);
+  __ StoreP(r8, FieldMemOperand(r7, count_offset), r0);
+  __ b(&done);
+
+  __ bind(&done_increment_count);
+
+  // Increment the call count for monomorphic function calls.
+  __ SmiToPtrArrayOffset(r8, r6);
+  __ add(r8, r5, r8);
+
+  __ LoadP(r7, FieldMemOperand(r8, count_offset));
+  __ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
+  __ StoreP(r7, FieldMemOperand(r8, count_offset), r0);
+
   __ bind(&done);
 }
 
@@ -2008,7 +1987,7 @@
   __ SmiToPtrArrayOffset(r8, r6);
   __ add(r5, r5, r8);
   __ LoadP(r6, FieldMemOperand(r5, count_offset));
-  __ AddSmiLiteral(r6, r6, Smi::FromInt(CallICNexus::kCallCountIncrement), r0);
+  __ AddSmiLiteral(r6, r6, Smi::FromInt(1), r0);
   __ StoreP(r6, FieldMemOperand(r5, count_offset), r0);
 
   __ mr(r5, r7);
@@ -2056,7 +2035,7 @@
   // Increment the call count for monomorphic function calls.
   const int count_offset = FixedArray::kHeaderSize + kPointerSize;
   __ LoadP(r6, FieldMemOperand(r9, count_offset));
-  __ AddSmiLiteral(r6, r6, Smi::FromInt(CallICNexus::kCallCountIncrement), r0);
+  __ AddSmiLiteral(r6, r6, Smi::FromInt(1), r0);
   __ StoreP(r6, FieldMemOperand(r9, count_offset), r0);
 
   __ bind(&call_function);
@@ -2126,7 +2105,7 @@
   __ bne(&miss);
 
   // Initialize the call counter.
-  __ LoadSmiLiteral(r8, Smi::FromInt(CallICNexus::kCallCountIncrement));
+  __ LoadSmiLiteral(r8, Smi::FromInt(1));
   __ StoreP(r8, FieldMemOperand(r9, count_offset), r0);
 
   // Store the function. Use a stub since we need a frame for allocation.
@@ -2217,13 +2196,7 @@
     // index_ is consumed by runtime conversion function.
     __ Push(object_, index_);
   }
-  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
-    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
-  } else {
-    DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
-    // NumberToSmi discards numbers that are not exact integers.
-    __ CallRuntime(Runtime::kNumberToSmi);
-  }
+  __ CallRuntime(Runtime::kNumberToSmi);
   // Save the conversion result before the pop instructions below
   // have a chance to overwrite it.
   __ Move(index_, r3);
@@ -2552,67 +2525,13 @@
   // r6: from index (untagged)
   __ SmiTag(r6, r6);
   StringCharAtGenerator generator(r3, r6, r5, r3, &runtime, &runtime, &runtime,
-                                  STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
+                                  RECEIVER_IS_STRING);
   generator.GenerateFast(masm);
   __ Drop(3);
   __ Ret();
   generator.SkipSlow(masm, &runtime);
 }
 
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
-  // The ToNumber stub takes one argument in r3.
-  STATIC_ASSERT(kSmiTag == 0);
-  __ TestIfSmi(r3, r0);
-  __ Ret(eq, cr0);
-
-  __ CompareObjectType(r3, r4, r4, HEAP_NUMBER_TYPE);
-  // r3: receiver
-  // r4: receiver instance type
-  __ Ret(eq);
-
-  NonNumberToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
-}
-
-void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
-  // The NonNumberToNumber stub takes one argument in r3.
-  __ AssertNotNumber(r3);
-
-  __ CompareObjectType(r3, r4, r4, FIRST_NONSTRING_TYPE);
-  // r3: receiver
-  // r4: receiver instance type
-  StringToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub, lt);
-
-  Label not_oddball;
-  __ cmpi(r4, Operand(ODDBALL_TYPE));
-  __ bne(&not_oddball);
-  __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset));
-  __ blr();
-  __ bind(&not_oddball);
-
-  __ push(r3);  // Push argument.
-  __ TailCallRuntime(Runtime::kToNumber);
-}
-
-void StringToNumberStub::Generate(MacroAssembler* masm) {
-  // The StringToNumber stub takes one argument in r3.
-  __ AssertString(r3);
-
-  // Check if string has a cached array index.
-  Label runtime;
-  __ lwz(r5, FieldMemOperand(r3, String::kHashFieldOffset));
-  __ And(r0, r5, Operand(String::kContainsCachedArrayIndexMask), SetRC);
-  __ bne(&runtime, cr0);
-  __ IndexFromHash(r5, r3);
-  __ blr();
-
-  __ bind(&runtime);
-  __ push(r3);  // Push argument.
-  __ TailCallRuntime(Runtime::kStringToNumber);
-}
-
 void ToStringStub::Generate(MacroAssembler* masm) {
   // The ToString stub takes one argument in r3.
   Label is_number;
@@ -2803,7 +2722,7 @@
   // Load r5 with the allocation site.  We stick an undefined dummy value here
   // and replace it with the real allocation site later when we instantiate this
   // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
-  __ Move(r5, handle(isolate()->heap()->undefined_value()));
+  __ Move(r5, isolate()->factory()->undefined_value());
 
   // Make sure that we actually patched the allocation site.
   if (FLAG_debug_code) {
@@ -3677,14 +3596,14 @@
 
 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  LoadICStub stub(isolate(), state());
+  LoadICStub stub(isolate());
   stub.GenerateForTrampoline(masm);
 }
 
 
 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  KeyedLoadICStub stub(isolate(), state());
+  KeyedLoadICStub stub(isolate());
   stub.GenerateForTrampoline(masm);
 }
 
@@ -4339,18 +4258,11 @@
 }
 
 
-void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
       isolate);
-  ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
-      isolate);
-  ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
-      isolate);
-}
-
-
-void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
-    Isolate* isolate) {
+  ArrayNArgumentsConstructorStub stub(isolate);
+  stub.GetCode();
   ElementsKind kinds[2] = {FAST_ELEMENTS, FAST_HOLEY_ELEMENTS};
   for (int i = 0; i < 2; i++) {
     // For internal arrays we only need a few things
@@ -4358,8 +4270,6 @@
     stubh1.GetCode();
     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
     stubh2.GetCode();
-    InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
-    stubh3.GetCode();
   }
 }
 
@@ -4378,13 +4288,15 @@
     CreateArrayDispatchOneArgument(masm, mode);
 
     __ bind(&not_one_case);
-    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+    ArrayNArgumentsConstructorStub stub(masm->isolate());
+    __ TailCallStub(&stub);
   } else if (argument_count() == NONE) {
     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
   } else if (argument_count() == ONE) {
     CreateArrayDispatchOneArgument(masm, mode);
   } else if (argument_count() == MORE_THAN_ONE) {
-    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+    ArrayNArgumentsConstructorStub stub(masm->isolate());
+    __ TailCallStub(&stub);
   } else {
     UNREACHABLE();
   }
@@ -4468,7 +4380,7 @@
   InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
   __ TailCallStub(&stub0, lt);
 
-  InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+  ArrayNArgumentsConstructorStub stubN(isolate());
   __ TailCallStub(&stubN, gt);
 
   if (IsFastPackedElementsKind(kind)) {
@@ -4686,13 +4598,13 @@
   // specified by the function's internal formal parameter count.
   Label rest_parameters;
   __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(r6, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
   __ LoadWordArith(
-      r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
+      r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
 #if V8_TARGET_ARCH_PPC64
-  __ SmiTag(r4);
+  __ SmiTag(r6);
 #endif
-  __ sub(r3, r3, r4, LeaveOE, SetRC);
+  __ sub(r3, r3, r6, LeaveOE, SetRC);
   __ bgt(&rest_parameters, cr0);
 
   // Return an empty rest parameter array.
@@ -4739,6 +4651,7 @@
     // ----------- S t a t e -------------
     //  -- cp : context
     //  -- r3 : number of rest parameters (tagged)
+    //  -- r4 : function
     //  -- r5 : pointer just past first rest parameters
     //  -- r9 : size of rest parameters
     //  -- lr : return address
@@ -4746,9 +4659,9 @@
 
     // Allocate space for the rest parameter array plus the backing store.
     Label allocate, done_allocate;
-    __ mov(r4, Operand(JSArray::kSize + FixedArray::kHeaderSize));
-    __ add(r4, r4, r9);
-    __ Allocate(r4, r6, r7, r8, &allocate, NO_ALLOCATION_FLAGS);
+    __ mov(r10, Operand(JSArray::kSize + FixedArray::kHeaderSize));
+    __ add(r10, r10, r9);
+    __ Allocate(r10, r6, r7, r8, &allocate, NO_ALLOCATION_FLAGS);
     __ bind(&done_allocate);
 
     // Setup the elements array in r6.
@@ -4779,17 +4692,25 @@
     __ addi(r3, r7, Operand(kHeapObjectTag));
     __ Ret();
 
-    // Fall back to %AllocateInNewSpace.
+    // Fall back to %AllocateInNewSpace (if not too big).
+    Label too_big_for_new_space;
     __ bind(&allocate);
+    __ Cmpi(r10, Operand(Page::kMaxRegularHeapObjectSize), r0);
+    __ bgt(&too_big_for_new_space);
     {
       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-      __ SmiTag(r4);
-      __ Push(r3, r5, r4);
+      __ SmiTag(r10);
+      __ Push(r3, r5, r10);
       __ CallRuntime(Runtime::kAllocateInNewSpace);
       __ mr(r6, r3);
       __ Pop(r3, r5);
     }
     __ b(&done_allocate);
+
+    // Fall back to %NewRestParameter.
+    __ bind(&too_big_for_new_space);
+    __ push(r4);
+    __ TailCallRuntime(Runtime::kNewRestParameter);
   }
 }
 
@@ -5095,10 +5016,10 @@
   __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
   __ beq(&arguments_adaptor);
   {
-    __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+    __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
     __ LoadWordArith(
         r3,
-        FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
+        FieldMemOperand(r7, SharedFunctionInfo::kFormalParameterCountOffset));
 #if V8_TARGET_ARCH_PPC64
     __ SmiTag(r3);
 #endif
@@ -5118,6 +5039,7 @@
   // ----------- S t a t e -------------
   //  -- cp : context
   //  -- r3 : number of rest parameters (tagged)
+  //  -- r4 : function
   //  -- r5 : pointer just past first rest parameters
   //  -- r9 : size of rest parameters
   //  -- lr : return address
@@ -5125,9 +5047,10 @@
 
   // Allocate space for the strict arguments object plus the backing store.
   Label allocate, done_allocate;
-  __ mov(r4, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
-  __ add(r4, r4, r9);
-  __ Allocate(r4, r6, r7, r8, &allocate, NO_ALLOCATION_FLAGS);
+  __ mov(r10,
+         Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+  __ add(r10, r10, r9);
+  __ Allocate(r10, r6, r7, r8, &allocate, NO_ALLOCATION_FLAGS);
   __ bind(&done_allocate);
 
   // Setup the elements array in r6.
@@ -5160,47 +5083,27 @@
   __ addi(r3, r7, Operand(kHeapObjectTag));
   __ Ret();
 
-  // Fall back to %AllocateInNewSpace.
+  // Fall back to %AllocateInNewSpace (if not too big).
+  Label too_big_for_new_space;
   __ bind(&allocate);
+  __ Cmpi(r10, Operand(Page::kMaxRegularHeapObjectSize), r0);
+  __ bgt(&too_big_for_new_space);
   {
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    __ SmiTag(r4);
-    __ Push(r3, r5, r4);
+    __ SmiTag(r10);
+    __ Push(r3, r5, r10);
     __ CallRuntime(Runtime::kAllocateInNewSpace);
     __ mr(r6, r3);
     __ Pop(r3, r5);
   }
   __ b(&done_allocate);
+
+  // Fall back to %NewStrictArguments.
+  __ bind(&too_big_for_new_space);
+  __ push(r4);
+  __ TailCallRuntime(Runtime::kNewStrictArguments);
 }
 
-void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
-  Register context = cp;
-  Register result = r3;
-  Register slot = r5;
-
-  // Go up the context chain to the script context.
-  for (int i = 0; i < depth(); ++i) {
-    __ LoadP(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
-    context = result;
-  }
-
-  // Load the PropertyCell value at the specified slot.
-  __ ShiftLeftImm(r0, slot, Operand(kPointerSizeLog2));
-  __ add(result, context, r0);
-  __ LoadP(result, ContextMemOperand(result));
-  __ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset));
-
-  // If the result is not the_hole, return. Otherwise, handle in the runtime.
-  __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
-  __ Ret(ne);
-
-  // Fallback to runtime.
-  __ SmiTag(slot);
-  __ Push(slot);
-  __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
-}
-
-
 void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
   Register value = r3;
   Register slot = r5;
diff --git a/src/ppc/codegen-ppc.cc b/src/ppc/codegen-ppc.cc
index 2139d87..8f2f1cd 100644
--- a/src/ppc/codegen-ppc.cc
+++ b/src/ppc/codegen-ppc.cc
@@ -16,62 +16,6 @@
 
 #define __ masm.
 
-
-#if defined(USE_SIMULATOR)
-byte* fast_exp_ppc_machine_code = nullptr;
-double fast_exp_simulator(double x, Isolate* isolate) {
-  return Simulator::current(isolate)
-      ->CallFPReturnsDouble(fast_exp_ppc_machine_code, x, 0);
-}
-#endif
-
-
-UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
-  size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
-  if (buffer == nullptr) return nullptr;
-  ExternalReference::InitializeMathExpData();
-
-  MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
-                      CodeObjectRequired::kNo);
-
-  {
-    DoubleRegister input = d1;
-    DoubleRegister result = d2;
-    DoubleRegister double_scratch1 = d3;
-    DoubleRegister double_scratch2 = d4;
-    Register temp1 = r7;
-    Register temp2 = r8;
-    Register temp3 = r9;
-
-// Called from C
-    __ function_descriptor();
-
-    __ Push(temp3, temp2, temp1);
-    MathExpGenerator::EmitMathExp(&masm, input, result, double_scratch1,
-                                  double_scratch2, temp1, temp2, temp3);
-    __ Pop(temp3, temp2, temp1);
-    __ fmr(d1, result);
-    __ Ret();
-  }
-
-  CodeDesc desc;
-  masm.GetCode(&desc);
-  DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
-
-  Assembler::FlushICache(isolate, buffer, actual_size);
-  base::OS::ProtectCode(buffer, actual_size);
-
-#if !defined(USE_SIMULATOR)
-  return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
-#else
-  fast_exp_ppc_machine_code = buffer;
-  return &fast_exp_simulator;
-#endif
-}
-
-
 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
 #if defined(USE_SIMULATOR)
   return nullptr;
@@ -513,96 +457,6 @@
   __ bind(&done);
 }
 
-
-static MemOperand ExpConstant(int index, Register base) {
-  return MemOperand(base, index * kDoubleSize);
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm, DoubleRegister input,
-                                   DoubleRegister result,
-                                   DoubleRegister double_scratch1,
-                                   DoubleRegister double_scratch2,
-                                   Register temp1, Register temp2,
-                                   Register temp3) {
-  DCHECK(!input.is(result));
-  DCHECK(!input.is(double_scratch1));
-  DCHECK(!input.is(double_scratch2));
-  DCHECK(!result.is(double_scratch1));
-  DCHECK(!result.is(double_scratch2));
-  DCHECK(!double_scratch1.is(double_scratch2));
-  DCHECK(!temp1.is(temp2));
-  DCHECK(!temp1.is(temp3));
-  DCHECK(!temp2.is(temp3));
-  DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
-  DCHECK(!masm->serializer_enabled());  // External references not serializable.
-
-  Label zero, infinity, done;
-
-  __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
-
-  __ lfd(double_scratch1, ExpConstant(0, temp3));
-  __ fcmpu(double_scratch1, input);
-  __ fmr(result, input);
-  __ bunordered(&done);
-  __ bge(&zero);
-
-  __ lfd(double_scratch2, ExpConstant(1, temp3));
-  __ fcmpu(input, double_scratch2);
-  __ bge(&infinity);
-
-  __ lfd(double_scratch1, ExpConstant(3, temp3));
-  __ lfd(result, ExpConstant(4, temp3));
-  __ fmul(double_scratch1, double_scratch1, input);
-  __ fadd(double_scratch1, double_scratch1, result);
-  __ MovDoubleLowToInt(temp2, double_scratch1);
-  __ fsub(double_scratch1, double_scratch1, result);
-  __ lfd(result, ExpConstant(6, temp3));
-  __ lfd(double_scratch2, ExpConstant(5, temp3));
-  __ fmul(double_scratch1, double_scratch1, double_scratch2);
-  __ fsub(double_scratch1, double_scratch1, input);
-  __ fsub(result, result, double_scratch1);
-  __ fmul(double_scratch2, double_scratch1, double_scratch1);
-  __ fmul(result, result, double_scratch2);
-  __ lfd(double_scratch2, ExpConstant(7, temp3));
-  __ fmul(result, result, double_scratch2);
-  __ fsub(result, result, double_scratch1);
-  __ lfd(double_scratch2, ExpConstant(8, temp3));
-  __ fadd(result, result, double_scratch2);
-  __ srwi(temp1, temp2, Operand(11));
-  __ andi(temp2, temp2, Operand(0x7ff));
-  __ addi(temp1, temp1, Operand(0x3ff));
-
-  // Must not call ExpConstant() after overwriting temp3!
-  __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
-  __ slwi(temp2, temp2, Operand(3));
-#if V8_TARGET_ARCH_PPC64
-  __ ldx(temp2, MemOperand(temp3, temp2));
-  __ sldi(temp1, temp1, Operand(52));
-  __ orx(temp2, temp1, temp2);
-  __ MovInt64ToDouble(double_scratch1, temp2);
-#else
-  __ add(ip, temp3, temp2);
-  __ lwz(temp3, MemOperand(ip, Register::kExponentOffset));
-  __ lwz(temp2, MemOperand(ip, Register::kMantissaOffset));
-  __ slwi(temp1, temp1, Operand(20));
-  __ orx(temp3, temp1, temp3);
-  __ MovInt64ToDouble(double_scratch1, temp3, temp2);
-#endif
-
-  __ fmul(result, result, double_scratch1);
-  __ b(&done);
-
-  __ bind(&zero);
-  __ fmr(result, kDoubleRegZero);
-  __ b(&done);
-
-  __ bind(&infinity);
-  __ lfd(result, ExpConstant(2, temp3));
-
-  __ bind(&done);
-}
-
 #undef __
 
 CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
diff --git a/src/ppc/codegen-ppc.h b/src/ppc/codegen-ppc.h
index c3cd9b3..ff487c3 100644
--- a/src/ppc/codegen-ppc.h
+++ b/src/ppc/codegen-ppc.h
@@ -23,18 +23,6 @@
  private:
   DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
 };
-
-class MathExpGenerator : public AllStatic {
- public:
-  // Register input isn't modified. All other registers are clobbered.
-  static void EmitMathExp(MacroAssembler* masm, DoubleRegister input,
-                          DoubleRegister result, DoubleRegister double_scratch1,
-                          DoubleRegister double_scratch2, Register temp1,
-                          Register temp2, Register temp3);
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/ppc/deoptimizer-ppc.cc b/src/ppc/deoptimizer-ppc.cc
index ead877e..39102a1 100644
--- a/src/ppc/deoptimizer-ppc.cc
+++ b/src/ppc/deoptimizer-ppc.cc
@@ -124,8 +124,7 @@
 
   // Save all double registers before messing with them.
   __ subi(sp, sp, Operand(kDoubleRegsSize));
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
     int code = config->GetAllocatableDoubleCode(i);
     const DoubleRegister dreg = DoubleRegister::from_code(code);
diff --git a/src/ppc/disasm-ppc.cc b/src/ppc/disasm-ppc.cc
index baba146..c0a02a8 100644
--- a/src/ppc/disasm-ppc.cc
+++ b/src/ppc/disasm-ppc.cc
@@ -39,6 +39,7 @@
 namespace v8 {
 namespace internal {
 
+const auto GetRegConfig = RegisterConfiguration::Crankshaft;
 
 //------------------------------------------------------------------------------
 
@@ -118,7 +119,7 @@
 
 // Print the double FP register name according to the active name converter.
 void Decoder::PrintDRegister(int reg) {
-  Print(DoubleRegister::from_code(reg).ToString());
+  Print(GetRegConfig()->GetDoubleRegisterName(reg));
 }
 
 
@@ -1401,7 +1402,7 @@
 
 
 const char* NameConverter::NameOfAddress(byte* addr) const {
-  v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+  v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
   return tmp_buffer_.start();
 }
 
@@ -1412,7 +1413,7 @@
 
 
 const char* NameConverter::NameOfCPURegister(int reg) const {
-  return v8::internal::Register::from_code(reg).ToString();
+  return v8::internal::GetRegConfig()->GetGeneralRegisterName(reg);
 }
 
 const char* NameConverter::NameOfByteCPURegister(int reg) const {
@@ -1461,7 +1462,7 @@
     buffer[0] = '\0';
     byte* prev_pc = pc;
     pc += d.InstructionDecode(buffer, pc);
-    v8::internal::PrintF(f, "%p    %08x      %s\n", prev_pc,
+    v8::internal::PrintF(f, "%p    %08x      %s\n", static_cast<void*>(prev_pc),
                          *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
   }
 }
diff --git a/src/ppc/interface-descriptors-ppc.cc b/src/ppc/interface-descriptors-ppc.cc
index 6426316..21fe1ef 100644
--- a/src/ppc/interface-descriptors-ppc.cc
+++ b/src/ppc/interface-descriptors-ppc.cc
@@ -11,6 +11,14 @@
 
 const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
 
+void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
+    CallInterfaceDescriptorData* data, int register_parameter_count) {
+  const Register default_stub_registers[] = {r3, r4, r5, r6, r7};
+  CHECK_LE(static_cast<size_t>(register_parameter_count),
+           arraysize(default_stub_registers));
+  data->InitializePlatformSpecific(register_parameter_count,
+                                   default_stub_registers);
+}
 
 const Register LoadDescriptor::ReceiverRegister() { return r4; }
 const Register LoadDescriptor::NameRegister() { return r5; }
@@ -39,9 +47,6 @@
 const Register StoreTransitionDescriptor::MapRegister() { return r6; }
 
 
-const Register LoadGlobalViaContextDescriptor::SlotRegister() { return r5; }
-
-
 const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r5; }
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r3; }
 
@@ -63,9 +68,6 @@
 const Register GrowArrayElementsDescriptor::ObjectRegister() { return r3; }
 const Register GrowArrayElementsDescriptor::KeyRegister() { return r6; }
 
-const Register HasPropertyDescriptor::ObjectRegister() { return r3; }
-const Register HasPropertyDescriptor::KeyRegister() { return r6; }
-
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {r5};
@@ -251,18 +253,18 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
+void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // register state
   // r3 -- number of arguments
   // r4 -- function
   // r5 -- allocation site with elements kind
-  Register registers[] = {r4, r5};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
+  Register registers[] = {r4, r5, r3};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
 
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
+void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // stack param count needs (constructor pointer, and single argument)
   Register registers[] = {r4, r5, r3};
@@ -270,24 +272,7 @@
 }
 
 
-void InternalArrayConstructorConstantArgCountDescriptor::
-    InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
-  // register state
-  // r3 -- number of arguments
-  // r4 -- constructor function
-  Register registers[] = {r4};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  // stack param count needs (constructor pointer, and single argument)
-  Register registers[] = {r4, r3};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastArrayPushDescriptor::InitializePlatformSpecific(
+void VarArgFunctionDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // stack param count needs (arg count)
   Register registers[] = {r3};
diff --git a/src/ppc/macro-assembler-ppc.cc b/src/ppc/macro-assembler-ppc.cc
index 0f5f3a7..bda6541 100644
--- a/src/ppc/macro-assembler-ppc.cc
+++ b/src/ppc/macro-assembler-ppc.cc
@@ -84,10 +84,6 @@
   Label start;
   bind(&start);
 
-  // Statement positions are expected to be recorded when the target
-  // address is loaded.
-  positions_recorder()->WriteRecordedPositions();
-
   // branch via link register and set LK bit for return point
   mtctr(target);
   bctrl();
@@ -128,11 +124,6 @@
   Label start;
   bind(&start);
 #endif
-
-  // Statement positions are expected to be recorded when the target
-  // address is loaded.
-  positions_recorder()->WriteRecordedPositions();
-
   // This can likely be optimized to make use of bc() with 24bit relative
   //
   // RecordRelocInfo(x.rmode_, x.imm_);
@@ -725,8 +716,7 @@
 
 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
   // General purpose registers are pushed last on the stack.
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
   int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
   return MemOperand(sp, doubles_size + register_offset);
@@ -1047,9 +1037,8 @@
 
 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
   LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  LoadP(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
-  LoadP(vector,
-        FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+  LoadP(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
+  LoadP(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
 }
 
 
@@ -1387,12 +1376,14 @@
                                              const ParameterCount& expected,
                                              const ParameterCount& actual) {
   Label skip_flooding;
-  ExternalReference step_in_enabled =
-      ExternalReference::debug_step_in_enabled_address(isolate());
-  mov(r7, Operand(step_in_enabled));
-  lbz(r7, MemOperand(r7));
-  cmpi(r7, Operand::Zero());
-  beq(&skip_flooding);
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  mov(r7, Operand(last_step_action));
+  LoadByte(r7, MemOperand(r7), r0);
+  extsb(r7, r7);
+  cmpi(r7, Operand(StepIn));
+  blt(&skip_flooding);
   {
     FrameScope frame(this,
                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -4750,8 +4741,7 @@
   if (reg5.is_valid()) regs |= reg5.bit();
   if (reg6.is_valid()) regs |= reg6.bit();
 
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
     int code = config->GetAllocatableGeneralCode(i);
     Register candidate = Register::from_code(code);
diff --git a/src/ppc/simulator-ppc.cc b/src/ppc/simulator-ppc.cc
index 79dc825..1585845 100644
--- a/src/ppc/simulator-ppc.cc
+++ b/src/ppc/simulator-ppc.cc
@@ -23,6 +23,8 @@
 namespace v8 {
 namespace internal {
 
+const auto GetRegConfig = RegisterConfiguration::Crankshaft;
+
 // This macro provides a platform independent use of sscanf. The reason for
 // SScanF not being implemented in a platform independent way through
 // ::v8::internal::OS in the same way as SNPrintF is that the
@@ -315,7 +317,7 @@
             for (int i = 0; i < kNumRegisters; i++) {
               value = GetRegisterValue(i);
               PrintF("    %3s: %08" V8PRIxPTR,
-                     Register::from_code(i).ToString(), value);
+                     GetRegConfig()->GetGeneralRegisterName(i), value);
               if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
                   (i % 2) == 0) {
                 dvalue = GetRegisterPairDoubleValue(i);
@@ -334,7 +336,7 @@
             for (int i = 0; i < kNumRegisters; i++) {
               value = GetRegisterValue(i);
               PrintF("     %3s: %08" V8PRIxPTR " %11" V8PRIdPTR,
-                     Register::from_code(i).ToString(), value, value);
+                     GetRegConfig()->GetGeneralRegisterName(i), value, value);
               if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
                   (i % 2) == 0) {
                 dvalue = GetRegisterPairDoubleValue(i);
@@ -354,7 +356,7 @@
               dvalue = GetFPDoubleRegisterValue(i);
               uint64_t as_words = bit_cast<uint64_t>(dvalue);
               PrintF("%3s: %f 0x%08x %08x\n",
-                     DoubleRegister::from_code(i).ToString(), dvalue,
+                     GetRegConfig()->GetDoubleRegisterName(i), dvalue,
                      static_cast<uint32_t>(as_words >> 32),
                      static_cast<uint32_t>(as_words & 0xffffffff));
             }
@@ -707,7 +709,7 @@
 }
 
 
-void Simulator::FlushICache(v8::internal::HashMap* i_cache, void* start_addr,
+void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
                             size_t size) {
   intptr_t start = reinterpret_cast<intptr_t>(start_addr);
   int intra_line = (start & CachePage::kLineMask);
@@ -729,9 +731,8 @@
 }
 
 
-CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
-  v8::internal::HashMap::Entry* entry =
-      i_cache->LookupOrInsert(page, ICacheHash(page));
+CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
+  base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
   if (entry->value == NULL) {
     CachePage* new_page = new CachePage();
     entry->value = new_page;
@@ -741,8 +742,7 @@
 
 
 // Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
-                             int size) {
+void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start, int size) {
   DCHECK(size <= CachePage::kPageSize);
   DCHECK(AllOnOnePage(start, size - 1));
   DCHECK((start & CachePage::kLineMask) == 0);
@@ -754,9 +754,7 @@
   memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
 }
 
-
-void Simulator::CheckICache(v8::internal::HashMap* i_cache,
-                            Instruction* instr) {
+void Simulator::CheckICache(base::HashMap* i_cache, Instruction* instr) {
   intptr_t address = reinterpret_cast<intptr_t>(instr);
   void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
   void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@@ -789,7 +787,7 @@
 Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
   i_cache_ = isolate_->simulator_i_cache();
   if (i_cache_ == NULL) {
-    i_cache_ = new v8::internal::HashMap(&ICacheMatch);
+    i_cache_ = new base::HashMap(&ICacheMatch);
     isolate_->set_simulator_i_cache(i_cache_);
   }
   Initialize(isolate);
@@ -925,10 +923,10 @@
 
 
 // static
-void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
+void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
   Redirection::DeleteChain(first);
   if (i_cache != nullptr) {
-    for (HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
+    for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
          entry = i_cache->Next(entry)) {
       delete static_cast<CachePage*>(entry->value);
     }
@@ -1284,15 +1282,18 @@
             case ExternalReference::BUILTIN_FP_FP_CALL:
             case ExternalReference::BUILTIN_COMPARE_CALL:
               PrintF("Call to host function at %p with args %f, %f",
-                     FUNCTION_ADDR(generic_target), dval0, dval1);
+                     static_cast<void*>(FUNCTION_ADDR(generic_target)),
+                     dval0, dval1);
               break;
             case ExternalReference::BUILTIN_FP_CALL:
               PrintF("Call to host function at %p with arg %f",
-                     FUNCTION_ADDR(generic_target), dval0);
+                     static_cast<void*>(FUNCTION_ADDR(generic_target)),
+                     dval0);
               break;
             case ExternalReference::BUILTIN_FP_INT_CALL:
               PrintF("Call to host function at %p with args %f, %" V8PRIdPTR,
-                     FUNCTION_ADDR(generic_target), dval0, ival);
+                     static_cast<void*>(FUNCTION_ADDR(generic_target)),
+                     dval0, ival);
               break;
             default:
               UNREACHABLE();
@@ -1434,8 +1435,8 @@
               "Call to host function at %p,\n"
               "\t\t\t\targs %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
               ", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR,
-              FUNCTION_ADDR(target), arg[0], arg[1], arg[2], arg[3], arg[4],
-              arg[5]);
+              static_cast<void*>(FUNCTION_ADDR(target)), arg[0], arg[1],
+              arg[2], arg[3], arg[4], arg[5]);
           if (!stack_aligned) {
             PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
                    get_register(sp));
diff --git a/src/ppc/simulator-ppc.h b/src/ppc/simulator-ppc.h
index a3b03dc..d3163e8 100644
--- a/src/ppc/simulator-ppc.h
+++ b/src/ppc/simulator-ppc.h
@@ -66,7 +66,7 @@
 // Running with a simulator.
 
 #include "src/assembler.h"
-#include "src/hashmap.h"
+#include "src/base/hashmap.h"
 #include "src/ppc/constants-ppc.h"
 
 namespace v8 {
@@ -217,7 +217,7 @@
   // Call on program start.
   static void Initialize(Isolate* isolate);
 
-  static void TearDown(HashMap* i_cache, Redirection* first);
+  static void TearDown(base::HashMap* i_cache, Redirection* first);
 
   // V8 generally calls into generated JS code with 5 parameters and into
   // generated RegExp code with 7 parameters. This is a convenience function,
@@ -239,8 +239,7 @@
   char* last_debugger_input() { return last_debugger_input_; }
 
   // ICache checking.
-  static void FlushICache(v8::internal::HashMap* i_cache, void* start,
-                          size_t size);
+  static void FlushICache(base::HashMap* i_cache, void* start, size_t size);
 
   // Returns true if pc register contains one of the 'special_values' defined
   // below (bad_lr, end_sim_pc).
@@ -330,10 +329,9 @@
   void ExecuteInstruction(Instruction* instr);
 
   // ICache.
-  static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
-  static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
-                           int size);
-  static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
+  static void CheckICache(base::HashMap* i_cache, Instruction* instr);
+  static void FlushOnePage(base::HashMap* i_cache, intptr_t start, int size);
+  static CachePage* GetCachePage(base::HashMap* i_cache, void* page);
 
   // Runtime call support.
   static void* RedirectExternalReference(
@@ -371,7 +369,7 @@
   char* last_debugger_input_;
 
   // Icache simulation
-  v8::internal::HashMap* i_cache_;
+  base::HashMap* i_cache_;
 
   // Registered breakpoints.
   Instruction* break_pc_;
diff --git a/src/profiler/allocation-tracker.cc b/src/profiler/allocation-tracker.cc
index 6acd191..d094d0e 100644
--- a/src/profiler/allocation-tracker.cc
+++ b/src/profiler/allocation-tracker.cc
@@ -151,8 +151,8 @@
 void AddressToTraceMap::Print() {
   PrintF("[AddressToTraceMap (%" PRIuS "): \n", ranges_.size());
   for (RangeMap::iterator it = ranges_.begin(); it != ranges_.end(); ++it) {
-    PrintF("[%p - %p] => %u\n", it->second.start, it->first,
-        it->second.trace_node_id);
+    PrintF("[%p - %p] => %u\n", static_cast<void*>(it->second.start),
+           static_cast<void*>(it->first), it->second.trace_node_id);
   }
   PrintF("]\n");
 }
@@ -190,12 +190,10 @@
     delete *info;
 }
 
-
-AllocationTracker::AllocationTracker(
-    HeapObjectsMap* ids, StringsStorage* names)
+AllocationTracker::AllocationTracker(HeapObjectsMap* ids, StringsStorage* names)
     : ids_(ids),
       names_(names),
-      id_to_function_info_index_(HashMap::PointersMatch),
+      id_to_function_info_index_(base::HashMap::PointersMatch),
       info_index_for_other_state_(0) {
   FunctionInfo* info = new FunctionInfo();
   info->name = "(root)";
@@ -261,7 +259,7 @@
 
 unsigned AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
                                             SnapshotObjectId id) {
-  HashMap::Entry* entry = id_to_function_info_index_.LookupOrInsert(
+  base::HashMap::Entry* entry = id_to_function_info_index_.LookupOrInsert(
       reinterpret_cast<void*>(id), SnapshotObjectIdHash(id));
   if (entry->value == NULL) {
     FunctionInfo* info = new FunctionInfo();
diff --git a/src/profiler/allocation-tracker.h b/src/profiler/allocation-tracker.h
index dbcf4a7..45bd446 100644
--- a/src/profiler/allocation-tracker.h
+++ b/src/profiler/allocation-tracker.h
@@ -8,8 +8,8 @@
 #include <map>
 
 #include "include/v8-profiler.h"
+#include "src/base/hashmap.h"
 #include "src/handles.h"
-#include "src/hashmap.h"
 #include "src/list.h"
 #include "src/vector.h"
 
@@ -143,7 +143,7 @@
   AllocationTraceTree trace_tree_;
   unsigned allocation_trace_buffer_[kMaxAllocationTraceLength];
   List<FunctionInfo*> function_info_list_;
-  HashMap id_to_function_info_index_;
+  base::HashMap id_to_function_info_index_;
   List<UnresolvedLocation*> unresolved_locations_;
   unsigned info_index_for_other_state_;
   AddressToTraceMap address_to_trace_;
diff --git a/src/profiler/cpu-profiler.cc b/src/profiler/cpu-profiler.cc
index 5e4a444..42b5fdf 100644
--- a/src/profiler/cpu-profiler.cc
+++ b/src/profiler/cpu-profiler.cc
@@ -21,7 +21,7 @@
 
 
 ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator,
-                                                 Sampler* sampler,
+                                                 sampler::Sampler* sampler,
                                                  base::TimeDelta period)
     : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
       generator_(generator),
@@ -199,299 +199,23 @@
   }
 }
 
-
-void CpuProfiler::CallbackEvent(Name* name, Address entry_point) {
-  CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
-  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
-  rec->start = entry_point;
-  rec->entry = profiles_->NewCodeEntry(
-      Logger::CALLBACK_TAG,
-      profiles_->GetName(name));
-  rec->size = 1;
-  processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
-                                  AbstractCode* code, const char* name) {
-  CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
-  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
-  rec->start = code->address();
-  rec->entry = profiles_->NewCodeEntry(
-      tag, profiles_->GetFunctionName(name), CodeEntry::kEmptyNamePrefix,
-      CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
-      CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
-  RecordInliningInfo(rec->entry, code);
-  rec->size = code->ExecutableSize();
-  processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
-                                  AbstractCode* code, Name* name) {
-  CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
-  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
-  rec->start = code->address();
-  rec->entry = profiles_->NewCodeEntry(
-      tag, profiles_->GetFunctionName(name), CodeEntry::kEmptyNamePrefix,
-      CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
-      CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
-  RecordInliningInfo(rec->entry, code);
-  rec->size = code->ExecutableSize();
-  processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
-                                  AbstractCode* code,
-                                  SharedFunctionInfo* shared,
-                                  Name* script_name) {
-  CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
-  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
-  rec->start = code->address();
-  rec->entry = profiles_->NewCodeEntry(
-      tag, profiles_->GetFunctionName(shared->DebugName()),
-      CodeEntry::kEmptyNamePrefix,
-      profiles_->GetName(InferScriptName(script_name, shared)),
-      CpuProfileNode::kNoLineNumberInfo, CpuProfileNode::kNoColumnNumberInfo,
-      NULL, code->instruction_start());
-  RecordInliningInfo(rec->entry, code);
-  rec->entry->FillFunctionInfo(shared);
-  rec->size = code->ExecutableSize();
-  processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
-                                  AbstractCode* abstract_code,
-                                  SharedFunctionInfo* shared, Name* script_name,
-                                  int line, int column) {
-  CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
-  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
-  rec->start = abstract_code->address();
-  Script* script = Script::cast(shared->script());
-  JITLineInfoTable* line_table = NULL;
-  if (script) {
-    if (abstract_code->IsCode()) {
-      Code* code = abstract_code->GetCode();
-      int start_position = shared->start_position();
-      int end_position = shared->end_position();
-      line_table = new JITLineInfoTable();
-      for (RelocIterator it(code); !it.done(); it.next()) {
-        RelocInfo* reloc_info = it.rinfo();
-        if (!RelocInfo::IsPosition(reloc_info->rmode())) continue;
-        int position = static_cast<int>(reloc_info->data());
-        // TODO(alph): in case of inlining the position may correspond
-        // to an inlined function source code. Do not collect positions
-        // that fall beyond the function source code. There's however a
-        // chance the inlined function has similar positions but in another
-        // script. So the proper fix is to store script_id in some form
-        // along with the inlined function positions.
-        if (position < start_position || position >= end_position) continue;
-        int pc_offset = static_cast<int>(reloc_info->pc() - code->address());
-        int line_number = script->GetLineNumber(position) + 1;
-        line_table->SetPosition(pc_offset, line_number);
-      }
-    } else {
-      BytecodeArray* bytecode = abstract_code->GetBytecodeArray();
-      line_table = new JITLineInfoTable();
-      interpreter::SourcePositionTableIterator it(
-          bytecode->source_position_table());
-      for (; !it.done(); it.Advance()) {
-        int line_number = script->GetLineNumber(it.source_position()) + 1;
-        int pc_offset = it.bytecode_offset() + BytecodeArray::kHeaderSize;
-        line_table->SetPosition(pc_offset, line_number);
-      }
+void CpuProfiler::CodeEventHandler(const CodeEventsContainer& evt_rec) {
+  switch (evt_rec.generic.type) {
+    case CodeEventRecord::CODE_CREATION:
+    case CodeEventRecord::CODE_MOVE:
+    case CodeEventRecord::CODE_DISABLE_OPT:
+      processor_->Enqueue(evt_rec);
+      break;
+    case CodeEventRecord::CODE_DEOPT: {
+      const CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
+      Address pc = reinterpret_cast<Address>(rec->pc);
+      int fp_to_sp_delta = rec->fp_to_sp_delta;
+      processor_->Enqueue(evt_rec);
+      processor_->AddDeoptStack(isolate_, pc, fp_to_sp_delta);
+      break;
     }
-  }
-  rec->entry = profiles_->NewCodeEntry(
-      tag, profiles_->GetFunctionName(shared->DebugName()),
-      CodeEntry::kEmptyNamePrefix,
-      profiles_->GetName(InferScriptName(script_name, shared)), line, column,
-      line_table, abstract_code->instruction_start());
-  RecordInliningInfo(rec->entry, abstract_code);
-  RecordDeoptInlinedFrames(rec->entry, abstract_code);
-  rec->entry->FillFunctionInfo(shared);
-  rec->size = abstract_code->ExecutableSize();
-  processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
-                                  AbstractCode* code, int args_count) {
-  CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
-  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
-  rec->start = code->address();
-  rec->entry = profiles_->NewCodeEntry(
-      tag, profiles_->GetName(args_count), "args_count: ",
-      CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
-      CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
-  RecordInliningInfo(rec->entry, code);
-  rec->size = code->ExecutableSize();
-  processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::CodeMoveEvent(AbstractCode* from, Address to) {
-  CodeEventsContainer evt_rec(CodeEventRecord::CODE_MOVE);
-  CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
-  rec->from = from->address();
-  rec->to = to;
-  processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::CodeDisableOptEvent(AbstractCode* code,
-                                      SharedFunctionInfo* shared) {
-  CodeEventsContainer evt_rec(CodeEventRecord::CODE_DISABLE_OPT);
-  CodeDisableOptEventRecord* rec = &evt_rec.CodeDisableOptEventRecord_;
-  rec->start = code->address();
-  rec->bailout_reason = GetBailoutReason(shared->disable_optimization_reason());
-  processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta) {
-  CodeEventsContainer evt_rec(CodeEventRecord::CODE_DEOPT);
-  CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
-  Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(code, pc);
-  rec->start = code->address();
-  rec->deopt_reason = Deoptimizer::GetDeoptReason(info.deopt_reason);
-  rec->position = info.position;
-  rec->deopt_id = info.deopt_id;
-  processor_->Enqueue(evt_rec);
-  processor_->AddDeoptStack(isolate_, pc, fp_to_sp_delta);
-}
-
-void CpuProfiler::GetterCallbackEvent(Name* name, Address entry_point) {
-  CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
-  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
-  rec->start = entry_point;
-  rec->entry = profiles_->NewCodeEntry(
-      Logger::CALLBACK_TAG,
-      profiles_->GetName(name),
-      "get ");
-  rec->size = 1;
-  processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::RegExpCodeCreateEvent(AbstractCode* code, String* source) {
-  CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
-  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
-  rec->start = code->address();
-  rec->entry = profiles_->NewCodeEntry(
-      Logger::REG_EXP_TAG, profiles_->GetName(source), "RegExp: ",
-      CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
-      CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
-  rec->size = code->ExecutableSize();
-  processor_->Enqueue(evt_rec);
-}
-
-
-void CpuProfiler::SetterCallbackEvent(Name* name, Address entry_point) {
-  CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
-  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
-  rec->start = entry_point;
-  rec->entry = profiles_->NewCodeEntry(
-      Logger::CALLBACK_TAG,
-      profiles_->GetName(name),
-      "set ");
-  rec->size = 1;
-  processor_->Enqueue(evt_rec);
-}
-
-Name* CpuProfiler::InferScriptName(Name* name, SharedFunctionInfo* info) {
-  if (name->IsString() && String::cast(name)->length()) return name;
-  if (!info->script()->IsScript()) return name;
-  Object* source_url = Script::cast(info->script())->source_url();
-  return source_url->IsName() ? Name::cast(source_url) : name;
-}
-
-void CpuProfiler::RecordInliningInfo(CodeEntry* entry,
-                                     AbstractCode* abstract_code) {
-  if (!abstract_code->IsCode()) return;
-  Code* code = abstract_code->GetCode();
-  if (code->kind() != Code::OPTIMIZED_FUNCTION) return;
-  DeoptimizationInputData* deopt_input_data =
-      DeoptimizationInputData::cast(code->deoptimization_data());
-  int deopt_count = deopt_input_data->DeoptCount();
-  for (int i = 0; i < deopt_count; i++) {
-    int pc_offset = deopt_input_data->Pc(i)->value();
-    if (pc_offset == -1) continue;
-    int translation_index = deopt_input_data->TranslationIndex(i)->value();
-    TranslationIterator it(deopt_input_data->TranslationByteArray(),
-                           translation_index);
-    Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
-    DCHECK_EQ(Translation::BEGIN, opcode);
-    it.Skip(Translation::NumberOfOperandsFor(opcode));
-    int depth = 0;
-    std::vector<CodeEntry*> inline_stack;
-    while (it.HasNext() &&
-           Translation::BEGIN !=
-               (opcode = static_cast<Translation::Opcode>(it.Next()))) {
-      if (opcode != Translation::JS_FRAME &&
-          opcode != Translation::INTERPRETED_FRAME) {
-        it.Skip(Translation::NumberOfOperandsFor(opcode));
-        continue;
-      }
-      it.Next();  // Skip ast_id
-      int shared_info_id = it.Next();
-      it.Next();  // Skip height
-      SharedFunctionInfo* shared_info = SharedFunctionInfo::cast(
-          deopt_input_data->LiteralArray()->get(shared_info_id));
-      if (!depth++) continue;  // Skip the current function itself.
-      CodeEntry* inline_entry = new CodeEntry(
-          entry->tag(), profiles_->GetFunctionName(shared_info->DebugName()),
-          CodeEntry::kEmptyNamePrefix, entry->resource_name(),
-          CpuProfileNode::kNoLineNumberInfo,
-          CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
-      inline_entry->FillFunctionInfo(shared_info);
-      inline_stack.push_back(inline_entry);
-    }
-    if (!inline_stack.empty()) {
-      entry->AddInlineStack(pc_offset, inline_stack);
-      DCHECK(inline_stack.empty());
-    }
-  }
-}
-
-void CpuProfiler::RecordDeoptInlinedFrames(CodeEntry* entry,
-                                           AbstractCode* abstract_code) {
-  if (abstract_code->kind() != AbstractCode::OPTIMIZED_FUNCTION) return;
-  Code* code = abstract_code->GetCode();
-  DeoptimizationInputData* deopt_input_data =
-      DeoptimizationInputData::cast(code->deoptimization_data());
-  int const mask = RelocInfo::ModeMask(RelocInfo::DEOPT_ID);
-  for (RelocIterator rit(code, mask); !rit.done(); rit.next()) {
-    RelocInfo* reloc_info = rit.rinfo();
-    DCHECK(RelocInfo::IsDeoptId(reloc_info->rmode()));
-    int deopt_id = static_cast<int>(reloc_info->data());
-    int translation_index =
-        deopt_input_data->TranslationIndex(deopt_id)->value();
-    TranslationIterator it(deopt_input_data->TranslationByteArray(),
-                           translation_index);
-    Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
-    DCHECK_EQ(Translation::BEGIN, opcode);
-    it.Skip(Translation::NumberOfOperandsFor(opcode));
-    std::vector<CodeEntry::DeoptInlinedFrame> inlined_frames;
-    while (it.HasNext() &&
-           Translation::BEGIN !=
-               (opcode = static_cast<Translation::Opcode>(it.Next()))) {
-      if (opcode != Translation::JS_FRAME &&
-          opcode != Translation::INTERPRETED_FRAME) {
-        it.Skip(Translation::NumberOfOperandsFor(opcode));
-        continue;
-      }
-      BailoutId ast_id = BailoutId(it.Next());
-      int shared_info_id = it.Next();
-      it.Next();  // Skip height
-      SharedFunctionInfo* shared = SharedFunctionInfo::cast(
-          deopt_input_data->LiteralArray()->get(shared_info_id));
-      int source_position = Deoptimizer::ComputeSourcePosition(shared, ast_id);
-      int script_id = v8::UnboundScript::kNoScriptId;
-      if (shared->script()->IsScript()) {
-        Script* script = Script::cast(shared->script());
-        script_id = script->id();
-      }
-      CodeEntry::DeoptInlinedFrame frame = {source_position, script_id};
-      inlined_frames.push_back(frame);
-    }
-    if (!inlined_frames.empty() && !entry->HasDeoptInlinedFramesFor(deopt_id)) {
-      entry->AddDeoptInlinedFrames(deopt_id, inlined_frames);
-      DCHECK(inlined_frames.empty());
-    }
+    default:
+      UNREACHABLE();
   }
 }
 
@@ -499,15 +223,12 @@
     : isolate_(isolate),
       sampling_interval_(base::TimeDelta::FromMicroseconds(
           FLAG_cpu_profiler_sampling_interval)),
-      profiles_(new CpuProfilesCollection(isolate->heap())),
-      generator_(NULL),
-      processor_(NULL),
+      profiles_(new CpuProfilesCollection(isolate)),
       is_profiling_(false) {
+  profiles_->set_cpu_profiler(this);
 }
 
-
-CpuProfiler::CpuProfiler(Isolate* isolate,
-                         CpuProfilesCollection* test_profiles,
+CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilesCollection* test_profiles,
                          ProfileGenerator* test_generator,
                          ProfilerEventsProcessor* test_processor)
     : isolate_(isolate),
@@ -517,28 +238,25 @@
       generator_(test_generator),
       processor_(test_processor),
       is_profiling_(false) {
+  profiles_->set_cpu_profiler(this);
 }
 
-
 CpuProfiler::~CpuProfiler() {
   DCHECK(!is_profiling_);
-  delete profiles_;
 }
 
-
 void CpuProfiler::set_sampling_interval(base::TimeDelta value) {
   DCHECK(!is_profiling_);
   sampling_interval_ = value;
 }
 
-
 void CpuProfiler::ResetProfiles() {
-  delete profiles_;
-  profiles_ = new CpuProfilesCollection(isolate()->heap());
+  profiles_.reset(new CpuProfilesCollection(isolate_));
+  profiles_->set_cpu_profiler(this);
 }
 
 void CpuProfiler::CollectSample() {
-  if (processor_ != NULL) {
+  if (processor_) {
     processor_->AddCurrentStack(isolate_);
   }
 }
@@ -557,7 +275,7 @@
 
 
 void CpuProfiler::StartProcessorIfNotStarted() {
-  if (processor_ != NULL) {
+  if (processor_) {
     processor_->AddCurrentStack(isolate_);
     return;
   }
@@ -565,11 +283,15 @@
   // Disable logging when using the new implementation.
   saved_is_logging_ = logger->is_logging_;
   logger->is_logging_ = false;
-  generator_ = new ProfileGenerator(profiles_);
-  Sampler* sampler = logger->sampler();
-  processor_ = new ProfilerEventsProcessor(
-      generator_, sampler, sampling_interval_);
+  sampler::Sampler* sampler = logger->sampler();
+  generator_.reset(new ProfileGenerator(profiles_.get()));
+  processor_.reset(new ProfilerEventsProcessor(generator_.get(), sampler,
+                                               sampling_interval_));
+  logger->SetUpProfilerListener();
+  ProfilerListener* profiler_listener = logger->profiler_listener();
+  profiler_listener->AddObserver(this);
   is_profiling_ = true;
+  isolate_->set_is_profiling(true);
   // Enumerate stuff we already have in the heap.
   DCHECK(isolate_->heap()->HasBeenSetUp());
   if (!FLAG_prof_browser_mode) {
@@ -587,10 +309,10 @@
 
 
 CpuProfile* CpuProfiler::StopProfiling(const char* title) {
-  if (!is_profiling_) return NULL;
+  if (!is_profiling_) return nullptr;
   StopProcessorIfLastProfile(title);
   CpuProfile* result = profiles_->StopProfiling(title);
-  if (result != NULL) {
+  if (result) {
     result->Print();
   }
   return result;
@@ -598,7 +320,7 @@
 
 
 CpuProfile* CpuProfiler::StopProfiling(String* title) {
-  if (!is_profiling_) return NULL;
+  if (!is_profiling_) return nullptr;
   const char* profile_title = profiles_->GetName(title);
   StopProcessorIfLastProfile(profile_title);
   return profiles_->StopProfiling(profile_title);
@@ -606,19 +328,24 @@
 
 
 void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
-  if (profiles_->IsLastProfile(title)) StopProcessor();
+  if (profiles_->IsLastProfile(title)) {
+    StopProcessor();
+  }
 }
 
 
 void CpuProfiler::StopProcessor() {
   Logger* logger = isolate_->logger();
-  Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
+  sampler::Sampler* sampler =
+      reinterpret_cast<sampler::Sampler*>(logger->ticker_);
   is_profiling_ = false;
+  isolate_->set_is_profiling(false);
+  ProfilerListener* profiler_listener = logger->profiler_listener();
+  profiler_listener->RemoveObserver(this);
   processor_->StopSynchronously();
-  delete processor_;
-  delete generator_;
-  processor_ = NULL;
-  generator_ = NULL;
+  logger->TearDownProfilerListener();
+  processor_.reset();
+  generator_.reset();
   sampler->SetHasProcessingThread(false);
   sampler->DecreaseProfilingDepth();
   logger->is_logging_ = saved_is_logging_;
@@ -638,6 +365,5 @@
   }
 }
 
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/profiler/cpu-profiler.h b/src/profiler/cpu-profiler.h
index ed1e15f..d354aeb 100644
--- a/src/profiler/cpu-profiler.h
+++ b/src/profiler/cpu-profiler.h
@@ -5,14 +5,18 @@
 #ifndef V8_PROFILER_CPU_PROFILER_H_
 #define V8_PROFILER_CPU_PROFILER_H_
 
+#include <memory>
+
 #include "src/allocation.h"
 #include "src/base/atomic-utils.h"
 #include "src/base/atomicops.h"
 #include "src/base/platform/time.h"
 #include "src/compiler.h"
+#include "src/isolate.h"
+#include "src/libsampler/v8-sampler.h"
 #include "src/locked-queue.h"
 #include "src/profiler/circular-queue.h"
-#include "src/profiler/sampler.h"
+#include "src/profiler/profiler-listener.h"
 #include "src/profiler/tick-sample.h"
 
 namespace v8 {
@@ -82,6 +86,8 @@
   const char* deopt_reason;
   SourcePosition position;
   int deopt_id;
+  void* pc;
+  int fp_to_sp_delta;
 
   INLINE(void UpdateCodeMap(CodeMap* code_map));
 };
@@ -128,7 +134,7 @@
 class ProfilerEventsProcessor : public base::Thread {
  public:
   ProfilerEventsProcessor(ProfileGenerator* generator,
-                          Sampler* sampler,
+                          sampler::Sampler* sampler,
                           base::TimeDelta period);
   virtual ~ProfilerEventsProcessor();
 
@@ -166,7 +172,7 @@
   SampleProcessingResult ProcessOneSample();
 
   ProfileGenerator* generator_;
-  Sampler* sampler_;
+  sampler::Sampler* sampler_;
   base::Atomic32 running_;
   const base::TimeDelta period_;  // Samples & code events processing period.
   LockedQueue<CodeEventsContainer> events_buffer_;
@@ -180,24 +186,11 @@
   unsigned last_processed_code_event_id_;
 };
 
-
-#define PROFILE(IsolateGetter, Call)                                        \
-  do {                                                                      \
-    Isolate* cpu_profiler_isolate = (IsolateGetter);                        \
-    v8::internal::Logger* logger = cpu_profiler_isolate->logger();          \
-    CpuProfiler* cpu_profiler = cpu_profiler_isolate->cpu_profiler();       \
-    if (logger->is_logging_code_events() || cpu_profiler->is_profiling()) { \
-      logger->Call;                                                         \
-    }                                                                       \
-  } while (false)
-
-
-class CpuProfiler : public CodeEventListener {
+class CpuProfiler : public CodeEventObserver {
  public:
   explicit CpuProfiler(Isolate* isolate);
 
-  CpuProfiler(Isolate* isolate,
-              CpuProfilesCollection* test_collection,
+  CpuProfiler(Isolate* isolate, CpuProfilesCollection* profiles,
               ProfileGenerator* test_generator,
               ProfilerEventsProcessor* test_processor);
 
@@ -214,41 +207,16 @@
   void DeleteAllProfiles();
   void DeleteProfile(CpuProfile* profile);
 
+  void CodeEventHandler(const CodeEventsContainer& evt_rec) override;
+
   // Invoked from stack sampler (thread or signal handler.)
   inline TickSample* StartTickSample();
   inline void FinishTickSample();
 
-  // Must be called via PROFILE macro, otherwise will crash when
-  // profiling is not enabled.
-  void CallbackEvent(Name* name, Address entry_point) override;
-  void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
-                       const char* comment) override;
-  void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
-                       Name* name) override;
-  void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
-                       SharedFunctionInfo* shared, Name* script_name) override;
-  void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
-                       SharedFunctionInfo* shared, Name* script_name, int line,
-                       int column) override;
-  void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
-                       int args_count) override;
-  void CodeMovingGCEvent() override {}
-  void CodeMoveEvent(AbstractCode* from, Address to) override;
-  void CodeDisableOptEvent(AbstractCode* code,
-                           SharedFunctionInfo* shared) override;
-  void CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta);
-  void GetterCallbackEvent(Name* name, Address entry_point) override;
-  void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
-  void SetterCallbackEvent(Name* name, Address entry_point) override;
-  void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
+  bool is_profiling() const { return is_profiling_; }
 
-  INLINE(bool is_profiling() const) { return is_profiling_; }
-  bool* is_profiling_address() {
-    return &is_profiling_;
-  }
-
-  ProfileGenerator* generator() const { return generator_; }
-  ProfilerEventsProcessor* processor() const { return processor_; }
+  ProfileGenerator* generator() const { return generator_.get(); }
+  ProfilerEventsProcessor* processor() const { return processor_.get(); }
   Isolate* isolate() const { return isolate_; }
 
  private:
@@ -257,15 +225,12 @@
   void StopProcessor();
   void ResetProfiles();
   void LogBuiltins();
-  void RecordInliningInfo(CodeEntry* entry, AbstractCode* abstract_code);
-  void RecordDeoptInlinedFrames(CodeEntry* entry, AbstractCode* abstract_code);
-  Name* InferScriptName(Name* name, SharedFunctionInfo* info);
 
-  Isolate* isolate_;
+  Isolate* const isolate_;
   base::TimeDelta sampling_interval_;
-  CpuProfilesCollection* profiles_;
-  ProfileGenerator* generator_;
-  ProfilerEventsProcessor* processor_;
+  std::unique_ptr<CpuProfilesCollection> profiles_;
+  std::unique_ptr<ProfileGenerator> generator_;
+  std::unique_ptr<ProfilerEventsProcessor> processor_;
   bool saved_is_logging_;
   bool is_profiling_;
 
diff --git a/src/profiler/heap-snapshot-generator.cc b/src/profiler/heap-snapshot-generator.cc
index e67acef..c80877f 100644
--- a/src/profiler/heap-snapshot-generator.cc
+++ b/src/profiler/heap-snapshot-generator.cc
@@ -80,8 +80,8 @@
 void HeapEntry::Print(
     const char* prefix, const char* edge_name, int max_depth, int indent) {
   STATIC_ASSERT(sizeof(unsigned) == sizeof(id()));
-  base::OS::Print("%6" PRIuS " @%6u %*c %s%s: ", self_size(), id(), indent,
-                  ' ', prefix, edge_name);
+  base::OS::Print("%6" PRIuS " @%6u %*c %s%s: ", self_size(), id(), indent, ' ',
+                  prefix, edge_name);
   if (type() != kString) {
     base::OS::Print("%s %.40s\n", TypeAsString(), name_);
   } else {
@@ -392,7 +392,7 @@
       entries_.at(to_entry_info_index).addr = NULL;
     }
   } else {
-    HashMap::Entry* to_entry =
+    base::HashMap::Entry* to_entry =
         entries_map_.LookupOrInsert(to, ComputePointerHash(to));
     if (to_entry->value != NULL) {
       // We found the existing entry with to address for an old object.
@@ -412,10 +412,8 @@
     // object is migrated.
     if (FLAG_heap_profiler_trace_objects) {
       PrintF("Move object from %p to %p old size %6d new size %6d\n",
-             from,
-             to,
-             entries_.at(from_entry_info_index).size,
-             object_size);
+             static_cast<void*>(from), static_cast<void*>(to),
+             entries_.at(from_entry_info_index).size, object_size);
     }
     entries_.at(from_entry_info_index).size = object_size;
     to_entry->value = from_value;
@@ -430,7 +428,8 @@
 
 
 SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) {
-  HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr));
+  base::HashMap::Entry* entry =
+      entries_map_.Lookup(addr, ComputePointerHash(addr));
   if (entry == NULL) return 0;
   int entry_index = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
   EntryInfo& entry_info = entries_.at(entry_index);
@@ -443,7 +442,7 @@
                                                 unsigned int size,
                                                 bool accessed) {
   DCHECK(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
-  HashMap::Entry* entry =
+  base::HashMap::Entry* entry =
       entries_map_.LookupOrInsert(addr, ComputePointerHash(addr));
   if (entry->value != NULL) {
     int entry_index =
@@ -452,9 +451,7 @@
     entry_info.accessed = accessed;
     if (FLAG_heap_profiler_trace_objects) {
       PrintF("Update object size : %p with old size %d and new size %d\n",
-             addr,
-             entry_info.size,
-             size);
+             static_cast<void*>(addr), entry_info.size, size);
     }
     entry_info.size = size;
     return entry_info.id;
@@ -487,9 +484,8 @@
     FindOrAddEntry(obj->address(), obj->Size());
     if (FLAG_heap_profiler_trace_objects) {
       PrintF("Update object      : %p %6d. Next address is %p\n",
-             obj->address(),
-             obj->Size(),
-             obj->address() + obj->Size());
+             static_cast<void*>(obj->address()), obj->Size(),
+             static_cast<void*>(obj->address() + obj->Size()));
     }
   }
   RemoveDeadEntries();
@@ -517,20 +513,16 @@
   void Print() const {
     if (expected_size == 0) {
       PrintF("Untracked object   : %p %6d. Next address is %p\n",
-             obj->address(),
-             obj->Size(),
-             obj->address() + obj->Size());
+             static_cast<void*>(obj->address()), obj->Size(),
+             static_cast<void*>(obj->address() + obj->Size()));
     } else if (obj->Size() != expected_size) {
-      PrintF("Wrong size %6d: %p %6d. Next address is %p\n",
-             expected_size,
-             obj->address(),
-             obj->Size(),
-             obj->address() + obj->Size());
+      PrintF("Wrong size %6d: %p %6d. Next address is %p\n", expected_size,
+             static_cast<void*>(obj->address()), obj->Size(),
+             static_cast<void*>(obj->address() + obj->Size()));
     } else {
       PrintF("Good object      : %p %6d. Next address is %p\n",
-             obj->address(),
-             expected_size,
-             obj->address() + obj->Size());
+             static_cast<void*>(obj->address()), expected_size,
+             static_cast<void*>(obj->address() + obj->Size()));
     }
   }
 };
@@ -554,7 +546,7 @@
   for (HeapObject* obj = iterator.next();
        obj != NULL;
        obj = iterator.next()) {
-    HashMap::Entry* entry =
+    base::HashMap::Entry* entry =
         entries_map_.Lookup(obj->address(), ComputePointerHash(obj->address()));
     if (entry == NULL) {
       ++untracked;
@@ -674,7 +666,7 @@
         entries_.at(first_free_entry) = entry_info;
       }
       entries_.at(first_free_entry).accessed = false;
-      HashMap::Entry* entry = entries_map_.Lookup(
+      base::HashMap::Entry* entry = entries_map_.Lookup(
           entry_info.addr, ComputePointerHash(entry_info.addr));
       DCHECK(entry);
       entry->value = reinterpret_cast<void*>(first_free_entry);
@@ -707,37 +699,28 @@
 
 
 size_t HeapObjectsMap::GetUsedMemorySize() const {
-  return
-      sizeof(*this) +
-      sizeof(HashMap::Entry) * entries_map_.capacity() +
-      GetMemoryUsedByList(entries_) +
-      GetMemoryUsedByList(time_intervals_);
+  return sizeof(*this) +
+         sizeof(base::HashMap::Entry) * entries_map_.capacity() +
+         GetMemoryUsedByList(entries_) + GetMemoryUsedByList(time_intervals_);
 }
 
-
-HeapEntriesMap::HeapEntriesMap()
-    : entries_(HashMap::PointersMatch) {
-}
-
+HeapEntriesMap::HeapEntriesMap() : entries_(base::HashMap::PointersMatch) {}
 
 int HeapEntriesMap::Map(HeapThing thing) {
-  HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing));
+  base::HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing));
   if (cache_entry == NULL) return HeapEntry::kNoEntry;
   return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
 }
 
 
 void HeapEntriesMap::Pair(HeapThing thing, int entry) {
-  HashMap::Entry* cache_entry = entries_.LookupOrInsert(thing, Hash(thing));
+  base::HashMap::Entry* cache_entry =
+      entries_.LookupOrInsert(thing, Hash(thing));
   DCHECK(cache_entry->value == NULL);
   cache_entry->value = reinterpret_cast<void*>(static_cast<intptr_t>(entry));
 }
 
-
-HeapObjectsSet::HeapObjectsSet()
-    : entries_(HashMap::PointersMatch) {
-}
-
+HeapObjectsSet::HeapObjectsSet() : entries_(base::HashMap::PointersMatch) {}
 
 void HeapObjectsSet::Clear() {
   entries_.Clear();
@@ -760,7 +743,7 @@
 
 const char* HeapObjectsSet::GetTag(Object* obj) {
   HeapObject* object = HeapObject::cast(obj);
-  HashMap::Entry* cache_entry =
+  base::HashMap::Entry* cache_entry =
       entries_.Lookup(object, HeapEntriesMap::Hash(object));
   return cache_entry != NULL
       ? reinterpret_cast<const char*>(cache_entry->value)
@@ -771,7 +754,7 @@
 void HeapObjectsSet::SetTag(Object* obj, const char* tag) {
   if (!obj->IsHeapObject()) return;
   HeapObject* object = HeapObject::cast(obj);
-  HashMap::Entry* cache_entry =
+  base::HashMap::Entry* cache_entry =
       entries_.LookupOrInsert(object, HeapEntriesMap::Hash(object));
   cache_entry->value = const_cast<char*>(tag);
 }
@@ -1117,7 +1100,7 @@
   } else if (obj->IsJSFunction()) {
     JSFunction* js_fun = JSFunction::cast(js_obj);
     Object* proto_or_map = js_fun->prototype_or_initial_map();
-    if (!proto_or_map->IsTheHole()) {
+    if (!proto_or_map->IsTheHole(heap_->isolate())) {
       if (!proto_or_map->IsMap()) {
         SetPropertyReference(
             obj, entry,
@@ -1387,9 +1370,9 @@
   SetInternalReference(obj, entry,
                        "optimized_code_map", shared->optimized_code_map(),
                        SharedFunctionInfo::kOptimizedCodeMapOffset);
-  SetInternalReference(obj, entry,
-                       "feedback_vector", shared->feedback_vector(),
-                       SharedFunctionInfo::kFeedbackVectorOffset);
+  SetInternalReference(obj, entry, "feedback_metadata",
+                       shared->feedback_metadata(),
+                       SharedFunctionInfo::kFeedbackMetadataOffset);
 }
 
 
@@ -1567,6 +1550,7 @@
 
 
 void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
+  Isolate* isolate = js_obj->GetIsolate();
   if (js_obj->HasFastProperties()) {
     DescriptorArray* descs = js_obj->map()->instance_descriptors();
     int real_size = js_obj->map()->NumberOfOwnDescriptors();
@@ -1600,7 +1584,7 @@
     int length = dictionary->Capacity();
     for (int i = 0; i < length; ++i) {
       Object* k = dictionary->KeyAt(i);
-      if (dictionary->IsKey(k)) {
+      if (dictionary->IsKey(isolate, k)) {
         DCHECK(dictionary->ValueAt(i)->IsPropertyCell());
         PropertyCell* cell = PropertyCell::cast(dictionary->ValueAt(i));
         Object* value = cell->value();
@@ -1614,7 +1598,7 @@
     int length = dictionary->Capacity();
     for (int i = 0; i < length; ++i) {
       Object* k = dictionary->KeyAt(i);
-      if (dictionary->IsKey(k)) {
+      if (dictionary->IsKey(isolate, k)) {
         Object* value = dictionary->ValueAt(i);
         PropertyDetails details = dictionary->DetailsAt(i);
         SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry,
@@ -1644,13 +1628,14 @@
 
 
 void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) {
+  Isolate* isolate = js_obj->GetIsolate();
   if (js_obj->HasFastObjectElements()) {
     FixedArray* elements = FixedArray::cast(js_obj->elements());
     int length = js_obj->IsJSArray() ?
         Smi::cast(JSArray::cast(js_obj)->length())->value() :
         elements->length();
     for (int i = 0; i < length; ++i) {
-      if (!elements->get(i)->IsTheHole()) {
+      if (!elements->get(i)->IsTheHole(isolate)) {
         SetElementReference(js_obj, entry, i, elements->get(i));
       }
     }
@@ -1659,7 +1644,7 @@
     int length = dictionary->Capacity();
     for (int i = 0; i < length; ++i) {
       Object* k = dictionary->KeyAt(i);
-      if (dictionary->IsKey(k)) {
+      if (dictionary->IsKey(isolate, k)) {
         DCHECK(k->IsNumber());
         uint32_t index = static_cast<uint32_t>(k->Number());
         SetElementReference(js_obj, entry, index, dictionary->ValueAt(i));
@@ -2261,8 +2246,7 @@
 
 
 NativeObjectsExplorer::~NativeObjectsExplorer() {
-  for (HashMap::Entry* p = objects_by_info_.Start();
-       p != NULL;
+  for (base::HashMap::Entry* p = objects_by_info_.Start(); p != NULL;
        p = objects_by_info_.Next(p)) {
     v8::RetainedObjectInfo* info =
         reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
@@ -2271,8 +2255,7 @@
         reinterpret_cast<List<HeapObject*>* >(p->value);
     delete objects;
   }
-  for (HashMap::Entry* p = native_groups_.Start();
-       p != NULL;
+  for (base::HashMap::Entry* p = native_groups_.Start(); p != NULL;
        p = native_groups_.Next(p)) {
     v8::RetainedObjectInfo* info =
         reinterpret_cast<v8::RetainedObjectInfo*>(p->value);
@@ -2344,7 +2327,8 @@
 
 List<HeapObject*>* NativeObjectsExplorer::GetListMaybeDisposeInfo(
     v8::RetainedObjectInfo* info) {
-  HashMap::Entry* entry = objects_by_info_.LookupOrInsert(info, InfoHash(info));
+  base::HashMap::Entry* entry =
+      objects_by_info_.LookupOrInsert(info, InfoHash(info));
   if (entry->value != NULL) {
     info->Dispose();
   } else {
@@ -2360,8 +2344,7 @@
   FillRetainedObjects();
   FillImplicitReferences();
   if (EstimateObjectsCount() > 0) {
-    for (HashMap::Entry* p = objects_by_info_.Start();
-         p != NULL;
+    for (base::HashMap::Entry* p = objects_by_info_.Start(); p != NULL;
          p = objects_by_info_.Next(p)) {
       v8::RetainedObjectInfo* info =
           reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
@@ -2413,7 +2396,7 @@
       label_copy,
       static_cast<int>(strlen(label_copy)),
       isolate_->heap()->HashSeed());
-  HashMap::Entry* entry =
+  base::HashMap::Entry* entry =
       native_groups_.LookupOrInsert(const_cast<char*>(label_copy), hash);
   if (entry->value == NULL) {
     entry->value = new NativeGroupRetainedObjectInfo(label);
@@ -2459,8 +2442,7 @@
 
 
 void NativeObjectsExplorer::SetRootNativeRootsReference() {
-  for (HashMap::Entry* entry = native_groups_.Start();
-       entry;
+  for (base::HashMap::Entry* entry = native_groups_.Start(); entry;
        entry = native_groups_.Next(entry)) {
     NativeGroupRetainedObjectInfo* group_info =
         static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
@@ -2728,7 +2710,7 @@
 
 
 int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
-  HashMap::Entry* cache_entry =
+  base::HashMap::Entry* cache_entry =
       strings_.LookupOrInsert(const_cast<char*>(s), StringHash(s));
   if (cache_entry->value == NULL) {
     cache_entry->value = reinterpret_cast<void*>(next_string_id_++);
@@ -3113,8 +3095,7 @@
 void HeapSnapshotJSONSerializer::SerializeStrings() {
   ScopedVector<const unsigned char*> sorted_strings(
       strings_.occupancy() + 1);
-  for (HashMap::Entry* entry = strings_.Start();
-       entry != NULL;
+  for (base::HashMap::Entry* entry = strings_.Start(); entry != NULL;
        entry = strings_.Next(entry)) {
     int index = static_cast<int>(reinterpret_cast<uintptr_t>(entry->value));
     sorted_strings[index] = reinterpret_cast<const unsigned char*>(entry->key);
diff --git a/src/profiler/heap-snapshot-generator.h b/src/profiler/heap-snapshot-generator.h
index 255f61d..a6bc385 100644
--- a/src/profiler/heap-snapshot-generator.h
+++ b/src/profiler/heap-snapshot-generator.h
@@ -259,7 +259,7 @@
   };
 
   SnapshotObjectId next_id_;
-  HashMap entries_map_;
+  base::HashMap entries_map_;
   List<EntryInfo> entries_;
   List<TimeInterval> time_intervals_;
   Heap* heap_;
@@ -297,7 +297,7 @@
         v8::internal::kZeroHashSeed);
   }
 
-  HashMap entries_;
+  base::HashMap entries_;
 
   friend class HeapObjectsSet;
 
@@ -316,7 +316,7 @@
   bool is_empty() const { return entries_.occupancy() == 0; }
 
  private:
-  HashMap entries_;
+  base::HashMap entries_;
 
   DISALLOW_COPY_AND_ASSIGN(HeapObjectsSet);
 };
@@ -521,8 +521,8 @@
   bool embedder_queried_;
   HeapObjectsSet in_groups_;
   // RetainedObjectInfo* -> List<HeapObject*>*
-  HashMap objects_by_info_;
-  HashMap native_groups_;
+  base::HashMap objects_by_info_;
+  base::HashMap native_groups_;
   HeapEntriesAllocator* synthetic_entries_allocator_;
   HeapEntriesAllocator* native_entries_allocator_;
   // Used during references extraction.
@@ -609,7 +609,7 @@
   static const int kNodeFieldsCount;
 
   HeapSnapshot* snapshot_;
-  HashMap strings_;
+  base::HashMap strings_;
   int next_node_id_;
   int next_string_id_;
   OutputStreamWriter* writer_;
diff --git a/src/profiler/profile-generator-inl.h b/src/profiler/profile-generator-inl.h
index 85edce2..0bb17e2 100644
--- a/src/profiler/profile-generator-inl.h
+++ b/src/profiler/profile-generator-inl.h
@@ -10,7 +10,7 @@
 namespace v8 {
 namespace internal {
 
-CodeEntry::CodeEntry(Logger::LogEventsAndTags tag, const char* name,
+CodeEntry::CodeEntry(CodeEventListener::LogEventsAndTags tag, const char* name,
                      const char* name_prefix, const char* resource_name,
                      int line_number, int column_number,
                      JITLineInfoTable* line_info, Address instruction_start)
@@ -29,7 +29,6 @@
       line_info_(line_info),
       instruction_start_(instruction_start) {}
 
-
 ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry)
     : tree_(tree),
       entry_(entry),
diff --git a/src/profiler/profile-generator.cc b/src/profiler/profile-generator.cc
index b07601f..d40cf2a 100644
--- a/src/profiler/profile-generator.cc
+++ b/src/profiler/profile-generator.cc
@@ -9,9 +9,9 @@
 #include "src/debug/debug.h"
 #include "src/deoptimizer.h"
 #include "src/global-handles.h"
+#include "src/profiler/cpu-profiler.h"
 #include "src/profiler/profile-generator-inl.h"
 #include "src/profiler/tick-sample.h"
-#include "src/splay-tree-inl.h"
 #include "src/unicode.h"
 
 namespace v8 {
@@ -48,6 +48,41 @@
 const char* const CodeEntry::kEmptyBailoutReason = "";
 const char* const CodeEntry::kNoDeoptReason = "";
 
+const char* const CodeEntry::kProgramEntryName = "(program)";
+const char* const CodeEntry::kIdleEntryName = "(idle)";
+const char* const CodeEntry::kGarbageCollectorEntryName = "(garbage collector)";
+const char* const CodeEntry::kUnresolvedFunctionName = "(unresolved function)";
+
+base::LazyDynamicInstance<CodeEntry, CodeEntry::ProgramEntryCreateTrait>::type
+    CodeEntry::kProgramEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+
+base::LazyDynamicInstance<CodeEntry, CodeEntry::IdleEntryCreateTrait>::type
+    CodeEntry::kIdleEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+
+base::LazyDynamicInstance<CodeEntry, CodeEntry::GCEntryCreateTrait>::type
+    CodeEntry::kGCEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+
+base::LazyDynamicInstance<CodeEntry,
+                          CodeEntry::UnresolvedEntryCreateTrait>::type
+    CodeEntry::kUnresolvedEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+
+CodeEntry* CodeEntry::ProgramEntryCreateTrait::Create() {
+  return new CodeEntry(Logger::FUNCTION_TAG, CodeEntry::kProgramEntryName);
+}
+
+CodeEntry* CodeEntry::IdleEntryCreateTrait::Create() {
+  return new CodeEntry(Logger::FUNCTION_TAG, CodeEntry::kIdleEntryName);
+}
+
+CodeEntry* CodeEntry::GCEntryCreateTrait::Create() {
+  return new CodeEntry(Logger::BUILTIN_TAG,
+                       CodeEntry::kGarbageCollectorEntryName);
+}
+
+CodeEntry* CodeEntry::UnresolvedEntryCreateTrait::Create() {
+  return new CodeEntry(Logger::FUNCTION_TAG,
+                       CodeEntry::kUnresolvedFunctionName);
+}
 
 CodeEntry::~CodeEntry() {
   delete line_info_;
@@ -94,7 +129,7 @@
 
 
 void CodeEntry::SetBuiltinId(Builtins::Name id) {
-  bit_field_ = TagField::update(bit_field_, Logger::BUILTIN_TAG);
+  bit_field_ = TagField::update(bit_field_, CodeEventListener::BUILTIN_TAG);
   bit_field_ = BuiltinIdField::update(bit_field_, id);
 }
 
@@ -170,14 +205,15 @@
 
 
 ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
-  HashMap::Entry* map_entry = children_.Lookup(entry, CodeEntryHash(entry));
+  base::HashMap::Entry* map_entry =
+      children_.Lookup(entry, CodeEntryHash(entry));
   return map_entry != NULL ?
       reinterpret_cast<ProfileNode*>(map_entry->value) : NULL;
 }
 
 
 ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
-  HashMap::Entry* map_entry =
+  base::HashMap::Entry* map_entry =
       children_.LookupOrInsert(entry, CodeEntryHash(entry));
   ProfileNode* node = reinterpret_cast<ProfileNode*>(map_entry->value);
   if (node == NULL) {
@@ -194,7 +230,7 @@
   if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) return;
   // Increment a hit counter of a certain source line.
   // Add a new source line if not found.
-  HashMap::Entry* e =
+  base::HashMap::Entry* e =
       line_ticks_.LookupOrInsert(reinterpret_cast<void*>(src_line), src_line);
   DCHECK(e);
   e->value = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(e->value) + 1);
@@ -212,7 +248,7 @@
 
   v8::CpuProfileNode::LineTick* entry = entries;
 
-  for (HashMap::Entry* p = line_ticks_.Start(); p != NULL;
+  for (base::HashMap::Entry *p = line_ticks_.Start(); p != NULL;
        p = line_ticks_.Next(p), entry++) {
     entry->line =
         static_cast<unsigned int>(reinterpret_cast<uintptr_t>(p->key));
@@ -250,8 +286,7 @@
     base::OS::Print("%*s bailed out due to '%s'\n", indent + 10, "",
                     bailout_reason);
   }
-  for (HashMap::Entry* p = children_.Start();
-       p != NULL;
+  for (base::HashMap::Entry* p = children_.Start(); p != NULL;
        p = children_.Next(p)) {
     reinterpret_cast<ProfileNode*>(p->value)->Print(indent + 2);
   }
@@ -269,16 +304,14 @@
   void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
 };
 
-
 ProfileTree::ProfileTree(Isolate* isolate)
-    : root_entry_(Logger::FUNCTION_TAG, "(root)"),
+    : root_entry_(CodeEventListener::FUNCTION_TAG, "(root)"),
       next_node_id_(1),
       root_(new ProfileNode(this, &root_entry_)),
       isolate_(isolate),
       next_function_id_(1),
       function_ids_(ProfileNode::CodeEntriesMatch) {}
 
-
 ProfileTree::~ProfileTree() {
   DeleteNodesCallback cb;
   TraverseDepthFirst(&cb);
@@ -287,7 +320,7 @@
 
 unsigned ProfileTree::GetFunctionId(const ProfileNode* node) {
   CodeEntry* code_entry = node->entry();
-  HashMap::Entry* entry =
+  base::HashMap::Entry* entry =
       function_ids_.LookupOrInsert(code_entry, code_entry->GetHash());
   if (!entry->value) {
     entry->value = reinterpret_cast<void*>(next_function_id_++);
@@ -366,12 +399,13 @@
   }
 }
 
-
-CpuProfile::CpuProfile(Isolate* isolate, const char* title, bool record_samples)
+CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title,
+                       bool record_samples)
     : title_(title),
       record_samples_(record_samples),
       start_time_(base::TimeTicks::HighResolutionNow()),
-      top_down_(isolate) {}
+      top_down_(profiler->isolate()),
+      profiler_(profiler) {}
 
 void CpuProfile::AddPath(base::TimeTicks timestamp,
                          const std::vector<CodeEntry*>& path, int src_line,
@@ -384,92 +418,60 @@
   }
 }
 
-
 void CpuProfile::CalculateTotalTicksAndSamplingRate() {
   end_time_ = base::TimeTicks::HighResolutionNow();
 }
 
-
 void CpuProfile::Print() {
   base::OS::Print("[Top down]:\n");
   top_down_.Print();
 }
 
-
-CodeMap::~CodeMap() {}
-
-
-const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL;
-
-
 void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
   DeleteAllCoveredCode(addr, addr + size);
-  CodeTree::Locator locator;
-  tree_.Insert(addr, &locator);
-  locator.set_value(CodeEntryInfo(entry, size));
+  code_map_.insert({addr, CodeEntryInfo(entry, size)});
 }
 
-
 void CodeMap::DeleteAllCoveredCode(Address start, Address end) {
-  List<Address> to_delete;
-  Address addr = end - 1;
-  while (addr >= start) {
-    CodeTree::Locator locator;
-    if (!tree_.FindGreatestLessThan(addr, &locator)) break;
-    Address start2 = locator.key(), end2 = start2 + locator.value().size;
-    if (start2 < end && start < end2) to_delete.Add(start2);
-    addr = start2 - 1;
+  auto left = code_map_.upper_bound(start);
+  if (left != code_map_.begin()) {
+    --left;
+    if (left->first + left->second.size <= start) ++left;
   }
-  for (int i = 0; i < to_delete.length(); ++i) tree_.Remove(to_delete[i]);
+  auto right = left;
+  while (right != code_map_.end() && right->first < end) ++right;
+  code_map_.erase(left, right);
 }
 
-
 CodeEntry* CodeMap::FindEntry(Address addr) {
-  CodeTree::Locator locator;
-  if (tree_.FindGreatestLessThan(addr, &locator)) {
-    // locator.key() <= addr. Need to check that addr is within entry.
-    const CodeEntryInfo& entry = locator.value();
-    if (addr < (locator.key() + entry.size)) {
-      return entry.entry;
-    }
-  }
-  return NULL;
+  auto it = code_map_.upper_bound(addr);
+  if (it == code_map_.begin()) return nullptr;
+  --it;
+  Address end_address = it->first + it->second.size;
+  return addr < end_address ? it->second.entry : nullptr;
 }
 
-
 void CodeMap::MoveCode(Address from, Address to) {
   if (from == to) return;
-  CodeTree::Locator locator;
-  if (!tree_.Find(from, &locator)) return;
-  CodeEntryInfo entry = locator.value();
-  tree_.Remove(from);
-  AddCode(to, entry.entry, entry.size);
+  auto it = code_map_.find(from);
+  if (it == code_map_.end()) return;
+  CodeEntryInfo info = it->second;
+  code_map_.erase(it);
+  AddCode(to, info.entry, info.size);
 }
 
-
-void CodeMap::CodeTreePrinter::Call(
-    const Address& key, const CodeMap::CodeEntryInfo& value) {
-  base::OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
-}
-
-
 void CodeMap::Print() {
-  CodeTreePrinter printer;
-  tree_.ForEach(&printer);
+  for (auto it = code_map_.begin(); it != code_map_.end(); ++it) {
+    base::OS::Print("%p %5d %s\n", static_cast<void*>(it->first),
+                    it->second.size, it->second.entry->name());
+  }
 }
 
-
-CpuProfilesCollection::CpuProfilesCollection(Heap* heap)
-    : function_and_resource_names_(heap),
-      isolate_(heap->isolate()),
+CpuProfilesCollection::CpuProfilesCollection(Isolate* isolate)
+    : resource_names_(isolate->heap()),
+      profiler_(nullptr),
       current_profiles_semaphore_(1) {}
 
-
-static void DeleteCodeEntry(CodeEntry** entry_ptr) {
-  delete *entry_ptr;
-}
-
-
 static void DeleteCpuProfile(CpuProfile** profile_ptr) {
   delete *profile_ptr;
 }
@@ -478,7 +480,6 @@
 CpuProfilesCollection::~CpuProfilesCollection() {
   finished_profiles_.Iterate(DeleteCpuProfile);
   current_profiles_.Iterate(DeleteCpuProfile);
-  code_entries_.Iterate(DeleteCodeEntry);
 }
 
 
@@ -497,7 +498,7 @@
       return true;
     }
   }
-  current_profiles_.Add(new CpuProfile(isolate_, title, record_samples));
+  current_profiles_.Add(new CpuProfile(profiler_, title, record_samples));
   current_profiles_semaphore_.Signal();
   return true;
 }
@@ -555,43 +556,8 @@
   current_profiles_semaphore_.Signal();
 }
 
-
-CodeEntry* CpuProfilesCollection::NewCodeEntry(
-    Logger::LogEventsAndTags tag, const char* name, const char* name_prefix,
-    const char* resource_name, int line_number, int column_number,
-    JITLineInfoTable* line_info, Address instruction_start) {
-  CodeEntry* code_entry =
-      new CodeEntry(tag, name, name_prefix, resource_name, line_number,
-                    column_number, line_info, instruction_start);
-  code_entries_.Add(code_entry);
-  return code_entry;
-}
-
-
-const char* const ProfileGenerator::kProgramEntryName =
-    "(program)";
-const char* const ProfileGenerator::kIdleEntryName =
-    "(idle)";
-const char* const ProfileGenerator::kGarbageCollectorEntryName =
-    "(garbage collector)";
-const char* const ProfileGenerator::kUnresolvedFunctionName =
-    "(unresolved function)";
-
-
 ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
-    : profiles_(profiles),
-      program_entry_(
-          profiles->NewCodeEntry(Logger::FUNCTION_TAG, kProgramEntryName)),
-      idle_entry_(
-          profiles->NewCodeEntry(Logger::FUNCTION_TAG, kIdleEntryName)),
-      gc_entry_(
-          profiles->NewCodeEntry(Logger::BUILTIN_TAG,
-                                 kGarbageCollectorEntryName)),
-      unresolved_entry_(
-          profiles->NewCodeEntry(Logger::FUNCTION_TAG,
-                                 kUnresolvedFunctionName)) {
-}
-
+    : profiles_(profiles) {}
 
 void ProfileGenerator::RecordTickSample(const TickSample& sample) {
   std::vector<CodeEntry*> entries;
@@ -607,9 +573,8 @@
   int src_line = v8::CpuProfileNode::kNoLineNumberInfo;
   bool src_line_not_found = true;
 
-  if (sample.pc != NULL) {
-    if (sample.has_external_callback && sample.state == EXTERNAL &&
-        sample.top_frame_type == StackFrame::EXIT) {
+  if (sample.pc != nullptr) {
+    if (sample.has_external_callback && sample.state == EXTERNAL) {
       // Don't use PC when in external callback code, as it can point
       // inside callback's code, and we will erroneously report
       // that a callback calls itself.
@@ -619,9 +584,7 @@
       // If there is no pc_entry we're likely in native code.
       // Find out, if top of stack was pointing inside a JS function
       // meaning that we have encountered a frameless invocation.
-      if (!pc_entry && (sample.top_frame_type == StackFrame::JAVA_SCRIPT ||
-                        sample.top_frame_type == StackFrame::INTERPRETED ||
-                        sample.top_frame_type == StackFrame::OPTIMIZED)) {
+      if (!pc_entry && !sample.has_external_callback) {
         pc_entry = code_map_.FindEntry(sample.tos);
       }
       // If pc is in the function code before it set up stack frame or after the
@@ -646,8 +609,8 @@
           // In the latter case we know the caller for sure but in the
           // former case we don't so we simply replace the frame with
           // 'unresolved' entry.
-          if (sample.top_frame_type == StackFrame::JAVA_SCRIPT) {
-            entries.push_back(unresolved_entry_);
+          if (!sample.has_external_callback) {
+            entries.push_back(CodeEntry::unresolved_entry());
           }
         }
       }
@@ -704,7 +667,7 @@
 CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
   switch (tag) {
     case GC:
-      return gc_entry_;
+      return CodeEntry::gc_entry();
     case JS:
     case COMPILER:
     // DOM events handlers are reported as OTHER / EXTERNAL entries.
@@ -712,9 +675,9 @@
     // one bucket.
     case OTHER:
     case EXTERNAL:
-      return program_entry_;
+      return CodeEntry::program_entry();
     case IDLE:
-      return idle_entry_;
+      return CodeEntry::idle_entry();
     default: return NULL;
   }
 }
diff --git a/src/profiler/profile-generator.h b/src/profiler/profile-generator.h
index 5c017e1..fdd87f3 100644
--- a/src/profiler/profile-generator.h
+++ b/src/profiler/profile-generator.h
@@ -8,8 +8,8 @@
 #include <map>
 #include "include/v8-profiler.h"
 #include "src/allocation.h"
+#include "src/base/hashmap.h"
 #include "src/compiler.h"
-#include "src/hashmap.h"
 #include "src/profiler/strings-storage.h"
 
 namespace v8 {
@@ -38,7 +38,7 @@
 class CodeEntry {
  public:
   // CodeEntry doesn't own name strings, just references them.
-  inline CodeEntry(Logger::LogEventsAndTags tag, const char* name,
+  inline CodeEntry(CodeEventListener::LogEventsAndTags tag, const char* name,
                    const char* name_prefix = CodeEntry::kEmptyNamePrefix,
                    const char* resource_name = CodeEntry::kEmptyResourceName,
                    int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
@@ -105,16 +105,56 @@
   bool HasDeoptInlinedFramesFor(int deopt_id) const;
 
   Address instruction_start() const { return instruction_start_; }
-  Logger::LogEventsAndTags tag() const { return TagField::decode(bit_field_); }
+  CodeEventListener::LogEventsAndTags tag() const {
+    return TagField::decode(bit_field_);
+  }
 
   static const char* const kEmptyNamePrefix;
   static const char* const kEmptyResourceName;
   static const char* const kEmptyBailoutReason;
   static const char* const kNoDeoptReason;
 
+  static const char* const kProgramEntryName;
+  static const char* const kIdleEntryName;
+  static const char* const kGarbageCollectorEntryName;
+  // Used to represent frames for which we have no reliable way to
+  // detect function.
+  static const char* const kUnresolvedFunctionName;
+
+  V8_INLINE static CodeEntry* program_entry() {
+    return kProgramEntry.Pointer();
+  }
+  V8_INLINE static CodeEntry* idle_entry() { return kIdleEntry.Pointer(); }
+  V8_INLINE static CodeEntry* gc_entry() { return kGCEntry.Pointer(); }
+  V8_INLINE static CodeEntry* unresolved_entry() {
+    return kUnresolvedEntry.Pointer();
+  }
+
  private:
+  struct ProgramEntryCreateTrait {
+    static CodeEntry* Create();
+  };
+  struct IdleEntryCreateTrait {
+    static CodeEntry* Create();
+  };
+  struct GCEntryCreateTrait {
+    static CodeEntry* Create();
+  };
+  struct UnresolvedEntryCreateTrait {
+    static CodeEntry* Create();
+  };
+
+  static base::LazyDynamicInstance<CodeEntry, ProgramEntryCreateTrait>::type
+      kProgramEntry;
+  static base::LazyDynamicInstance<CodeEntry, IdleEntryCreateTrait>::type
+      kIdleEntry;
+  static base::LazyDynamicInstance<CodeEntry, GCEntryCreateTrait>::type
+      kGCEntry;
+  static base::LazyDynamicInstance<CodeEntry, UnresolvedEntryCreateTrait>::type
+      kUnresolvedEntry;
+
   class TagField : public BitField<Logger::LogEventsAndTags, 0, 8> {};
-  class BuiltinIdField : public BitField<Builtins::Name, 8, 8> {};
+  class BuiltinIdField : public BitField<Builtins::Name, 8, 24> {};
 
   uint32_t bit_field_;
   const char* name_prefix_;
@@ -180,10 +220,10 @@
   CodeEntry* entry_;
   unsigned self_ticks_;
   // Mapping from CodeEntry* to ProfileNode*
-  HashMap children_;
+  base::HashMap children_;
   List<ProfileNode*> children_list_;
   unsigned id_;
-  HashMap line_ticks_;
+  base::HashMap line_ticks_;
 
   std::vector<CpuProfileDeoptInfo> deopt_infos_;
 
@@ -220,7 +260,7 @@
   Isolate* isolate_;
 
   unsigned next_function_id_;
-  HashMap function_ids_;
+  base::HashMap function_ids_;
 
   DISALLOW_COPY_AND_ASSIGN(ProfileTree);
 };
@@ -228,7 +268,7 @@
 
 class CpuProfile {
  public:
-  CpuProfile(Isolate* isolate, const char* title, bool record_samples);
+  CpuProfile(CpuProfiler* profiler, const char* title, bool record_samples);
 
   // Add pc -> ... -> main() call path to the profile.
   void AddPath(base::TimeTicks timestamp, const std::vector<CodeEntry*>& path,
@@ -246,6 +286,7 @@
 
   base::TimeTicks start_time() const { return start_time_; }
   base::TimeTicks end_time() const { return end_time_; }
+  CpuProfiler* cpu_profiler() const { return profiler_; }
 
   void UpdateTicksScale();
 
@@ -259,20 +300,18 @@
   List<ProfileNode*> samples_;
   List<base::TimeTicks> timestamps_;
   ProfileTree top_down_;
+  CpuProfiler* const profiler_;
 
   DISALLOW_COPY_AND_ASSIGN(CpuProfile);
 };
 
-
 class CodeMap {
  public:
   CodeMap() {}
-  ~CodeMap();
+
   void AddCode(Address addr, CodeEntry* entry, unsigned size);
   void MoveCode(Address from, Address to);
   CodeEntry* FindEntry(Address addr);
-  int GetSharedId(Address addr);
-
   void Print();
 
  private:
@@ -283,61 +322,26 @@
     unsigned size;
   };
 
-  struct CodeTreeConfig {
-    typedef Address Key;
-    typedef CodeEntryInfo Value;
-    static const Key kNoKey;
-    static const Value NoValue() { return CodeEntryInfo(NULL, 0); }
-    static int Compare(const Key& a, const Key& b) {
-      return a < b ? -1 : (a > b ? 1 : 0);
-    }
-  };
-  typedef SplayTree<CodeTreeConfig> CodeTree;
-
-  class CodeTreePrinter {
-   public:
-    void Call(const Address& key, const CodeEntryInfo& value);
-  };
-
   void DeleteAllCoveredCode(Address start, Address end);
 
-  CodeTree tree_;
+  std::map<Address, CodeEntryInfo> code_map_;
 
   DISALLOW_COPY_AND_ASSIGN(CodeMap);
 };
 
-
 class CpuProfilesCollection {
  public:
-  explicit CpuProfilesCollection(Heap* heap);
+  explicit CpuProfilesCollection(Isolate* isolate);
   ~CpuProfilesCollection();
 
+  void set_cpu_profiler(CpuProfiler* profiler) { profiler_ = profiler; }
   bool StartProfiling(const char* title, bool record_samples);
   CpuProfile* StopProfiling(const char* title);
   List<CpuProfile*>* profiles() { return &finished_profiles_; }
-  const char* GetName(Name* name) {
-    return function_and_resource_names_.GetName(name);
-  }
-  const char* GetName(int args_count) {
-    return function_and_resource_names_.GetName(args_count);
-  }
-  const char* GetFunctionName(Name* name) {
-    return function_and_resource_names_.GetFunctionName(name);
-  }
-  const char* GetFunctionName(const char* name) {
-    return function_and_resource_names_.GetFunctionName(name);
-  }
+  const char* GetName(Name* name) { return resource_names_.GetName(name); }
   bool IsLastProfile(const char* title);
   void RemoveProfile(CpuProfile* profile);
 
-  CodeEntry* NewCodeEntry(
-      Logger::LogEventsAndTags tag, const char* name,
-      const char* name_prefix = CodeEntry::kEmptyNamePrefix,
-      const char* resource_name = CodeEntry::kEmptyResourceName,
-      int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
-      int column_number = v8::CpuProfileNode::kNoColumnNumberInfo,
-      JITLineInfoTable* line_info = NULL, Address instruction_start = NULL);
-
   // Called from profile generator thread.
   void AddPathToCurrentProfiles(base::TimeTicks timestamp,
                                 const std::vector<CodeEntry*>& path,
@@ -347,11 +351,9 @@
   static const int kMaxSimultaneousProfiles = 100;
 
  private:
-  StringsStorage function_and_resource_names_;
-  List<CodeEntry*> code_entries_;
+  StringsStorage resource_names_;
   List<CpuProfile*> finished_profiles_;
-
-  Isolate* isolate_;
+  CpuProfiler* profiler_;
 
   // Accessed by VM thread and profile generator thread.
   List<CpuProfile*> current_profiles_;
@@ -369,22 +371,11 @@
 
   CodeMap* code_map() { return &code_map_; }
 
-  static const char* const kProgramEntryName;
-  static const char* const kIdleEntryName;
-  static const char* const kGarbageCollectorEntryName;
-  // Used to represent frames for which we have no reliable way to
-  // detect function.
-  static const char* const kUnresolvedFunctionName;
-
  private:
   CodeEntry* EntryForVMState(StateTag tag);
 
   CpuProfilesCollection* profiles_;
   CodeMap code_map_;
-  CodeEntry* program_entry_;
-  CodeEntry* idle_entry_;
-  CodeEntry* gc_entry_;
-  CodeEntry* unresolved_entry_;
 
   DISALLOW_COPY_AND_ASSIGN(ProfileGenerator);
 };
diff --git a/src/profiler/profiler-listener.cc b/src/profiler/profiler-listener.cc
new file mode 100644
index 0000000..2b353e7
--- /dev/null
+++ b/src/profiler/profiler-listener.cc
@@ -0,0 +1,339 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/profiler/profiler-listener.h"
+
+#include "src/deoptimizer.h"
+#include "src/interpreter/source-position-table.h"
+#include "src/profiler/cpu-profiler.h"
+#include "src/profiler/profile-generator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+ProfilerListener::ProfilerListener(Isolate* isolate)
+    : function_and_resource_names_(isolate->heap()) {}
+
+ProfilerListener::~ProfilerListener() {
+  for (auto code_entry : code_entries_) {
+    delete code_entry;
+  }
+}
+
+void ProfilerListener::CallbackEvent(Name* name, Address entry_point) {
+  CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+  rec->start = entry_point;
+  rec->entry = NewCodeEntry(CodeEventListener::CALLBACK_TAG, GetName(name));
+  rec->size = 1;
+  DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+                                       AbstractCode* code, const char* name) {
+  CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+  rec->start = code->address();
+  rec->entry = NewCodeEntry(
+      tag, GetFunctionName(name), CodeEntry::kEmptyNamePrefix,
+      CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
+      CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
+  RecordInliningInfo(rec->entry, code);
+  rec->size = code->ExecutableSize();
+  DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+                                       AbstractCode* code, Name* name) {
+  CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+  rec->start = code->address();
+  rec->entry = NewCodeEntry(
+      tag, GetFunctionName(name), CodeEntry::kEmptyNamePrefix,
+      CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
+      CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
+  RecordInliningInfo(rec->entry, code);
+  rec->size = code->ExecutableSize();
+  DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+                                       AbstractCode* code,
+                                       SharedFunctionInfo* shared,
+                                       Name* script_name) {
+  CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+  rec->start = code->address();
+  rec->entry = NewCodeEntry(
+      tag, GetFunctionName(shared->DebugName()), CodeEntry::kEmptyNamePrefix,
+      GetName(InferScriptName(script_name, shared)),
+      CpuProfileNode::kNoLineNumberInfo, CpuProfileNode::kNoColumnNumberInfo,
+      NULL, code->instruction_start());
+  RecordInliningInfo(rec->entry, code);
+  rec->entry->FillFunctionInfo(shared);
+  rec->size = code->ExecutableSize();
+  DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+                                       AbstractCode* abstract_code,
+                                       SharedFunctionInfo* shared,
+                                       Name* script_name, int line,
+                                       int column) {
+  CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+  rec->start = abstract_code->address();
+  Script* script = Script::cast(shared->script());
+  JITLineInfoTable* line_table = NULL;
+  if (script) {
+    if (abstract_code->IsCode()) {
+      Code* code = abstract_code->GetCode();
+      int start_position = shared->start_position();
+      int end_position = shared->end_position();
+      line_table = new JITLineInfoTable();
+      for (RelocIterator it(code); !it.done(); it.next()) {
+        RelocInfo* reloc_info = it.rinfo();
+        if (!RelocInfo::IsPosition(reloc_info->rmode())) continue;
+        int position = static_cast<int>(reloc_info->data());
+        // TODO(alph): in case of inlining the position may correspond
+        // to an inlined function source code. Do not collect positions
+        // that fall beyond the function source code. There's however a
+        // chance the inlined function has similar positions but in another
+        // script. So the proper fix is to store script_id in some form
+        // along with the inlined function positions.
+        if (position < start_position || position >= end_position) continue;
+        int pc_offset = static_cast<int>(reloc_info->pc() - code->address());
+        int line_number = script->GetLineNumber(position) + 1;
+        line_table->SetPosition(pc_offset, line_number);
+      }
+    } else {
+      BytecodeArray* bytecode = abstract_code->GetBytecodeArray();
+      line_table = new JITLineInfoTable();
+      interpreter::SourcePositionTableIterator it(
+          bytecode->source_position_table());
+      for (; !it.done(); it.Advance()) {
+        int line_number = script->GetLineNumber(it.source_position()) + 1;
+        int pc_offset = it.bytecode_offset() + BytecodeArray::kHeaderSize;
+        line_table->SetPosition(pc_offset, line_number);
+      }
+    }
+  }
+  rec->entry = NewCodeEntry(
+      tag, GetFunctionName(shared->DebugName()), CodeEntry::kEmptyNamePrefix,
+      GetName(InferScriptName(script_name, shared)), line, column, line_table,
+      abstract_code->instruction_start());
+  RecordInliningInfo(rec->entry, abstract_code);
+  RecordDeoptInlinedFrames(rec->entry, abstract_code);
+  rec->entry->FillFunctionInfo(shared);
+  rec->size = abstract_code->ExecutableSize();
+  DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+                                       AbstractCode* code, int args_count) {
+  CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+  rec->start = code->address();
+  rec->entry = NewCodeEntry(
+      tag, GetName(args_count), "args_count: ", CodeEntry::kEmptyResourceName,
+      CpuProfileNode::kNoLineNumberInfo, CpuProfileNode::kNoColumnNumberInfo,
+      NULL, code->instruction_start());
+  RecordInliningInfo(rec->entry, code);
+  rec->size = code->ExecutableSize();
+  DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::CodeMoveEvent(AbstractCode* from, Address to) {
+  CodeEventsContainer evt_rec(CodeEventRecord::CODE_MOVE);
+  CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
+  rec->from = from->address();
+  rec->to = to;
+  DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::CodeDisableOptEvent(AbstractCode* code,
+                                           SharedFunctionInfo* shared) {
+  CodeEventsContainer evt_rec(CodeEventRecord::CODE_DISABLE_OPT);
+  CodeDisableOptEventRecord* rec = &evt_rec.CodeDisableOptEventRecord_;
+  rec->start = code->address();
+  rec->bailout_reason = GetBailoutReason(shared->disable_optimization_reason());
+  DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::CodeDeoptEvent(Code* code, Address pc,
+                                      int fp_to_sp_delta) {
+  CodeEventsContainer evt_rec(CodeEventRecord::CODE_DEOPT);
+  CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
+  Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(code, pc);
+  rec->start = code->address();
+  rec->deopt_reason = Deoptimizer::GetDeoptReason(info.deopt_reason);
+  rec->position = info.position;
+  rec->deopt_id = info.deopt_id;
+  rec->pc = reinterpret_cast<void*>(pc);
+  rec->fp_to_sp_delta = fp_to_sp_delta;
+  DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::GetterCallbackEvent(Name* name, Address entry_point) {
+  CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+  rec->start = entry_point;
+  rec->entry =
+      NewCodeEntry(CodeEventListener::CALLBACK_TAG, GetName(name), "get ");
+  rec->size = 1;
+  DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::RegExpCodeCreateEvent(AbstractCode* code,
+                                             String* source) {
+  CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+  rec->start = code->address();
+  rec->entry = NewCodeEntry(
+      CodeEventListener::REG_EXP_TAG, GetName(source), "RegExp: ",
+      CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
+      CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
+  rec->size = code->ExecutableSize();
+  DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::SetterCallbackEvent(Name* name, Address entry_point) {
+  CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+  rec->start = entry_point;
+  rec->entry =
+      NewCodeEntry(CodeEventListener::CALLBACK_TAG, GetName(name), "set ");
+  rec->size = 1;
+  DispatchCodeEvent(evt_rec);
+}
+
+Name* ProfilerListener::InferScriptName(Name* name, SharedFunctionInfo* info) {
+  if (name->IsString() && String::cast(name)->length()) return name;
+  if (!info->script()->IsScript()) return name;
+  Object* source_url = Script::cast(info->script())->source_url();
+  return source_url->IsName() ? Name::cast(source_url) : name;
+}
+
+void ProfilerListener::RecordInliningInfo(CodeEntry* entry,
+                                          AbstractCode* abstract_code) {
+  if (!abstract_code->IsCode()) return;
+  Code* code = abstract_code->GetCode();
+  if (code->kind() != Code::OPTIMIZED_FUNCTION) return;
+  DeoptimizationInputData* deopt_input_data =
+      DeoptimizationInputData::cast(code->deoptimization_data());
+  int deopt_count = deopt_input_data->DeoptCount();
+  for (int i = 0; i < deopt_count; i++) {
+    int pc_offset = deopt_input_data->Pc(i)->value();
+    if (pc_offset == -1) continue;
+    int translation_index = deopt_input_data->TranslationIndex(i)->value();
+    TranslationIterator it(deopt_input_data->TranslationByteArray(),
+                           translation_index);
+    Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
+    DCHECK_EQ(Translation::BEGIN, opcode);
+    it.Skip(Translation::NumberOfOperandsFor(opcode));
+    int depth = 0;
+    std::vector<CodeEntry*> inline_stack;
+    while (it.HasNext() &&
+           Translation::BEGIN !=
+               (opcode = static_cast<Translation::Opcode>(it.Next()))) {
+      if (opcode != Translation::JS_FRAME &&
+          opcode != Translation::INTERPRETED_FRAME) {
+        it.Skip(Translation::NumberOfOperandsFor(opcode));
+        continue;
+      }
+      it.Next();  // Skip ast_id
+      int shared_info_id = it.Next();
+      it.Next();  // Skip height
+      SharedFunctionInfo* shared_info = SharedFunctionInfo::cast(
+          deopt_input_data->LiteralArray()->get(shared_info_id));
+      if (!depth++) continue;  // Skip the current function itself.
+      CodeEntry* inline_entry = new CodeEntry(
+          entry->tag(), GetFunctionName(shared_info->DebugName()),
+          CodeEntry::kEmptyNamePrefix, entry->resource_name(),
+          CpuProfileNode::kNoLineNumberInfo,
+          CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
+      inline_entry->FillFunctionInfo(shared_info);
+      inline_stack.push_back(inline_entry);
+    }
+    if (!inline_stack.empty()) {
+      entry->AddInlineStack(pc_offset, inline_stack);
+      DCHECK(inline_stack.empty());
+    }
+  }
+}
+
+void ProfilerListener::RecordDeoptInlinedFrames(CodeEntry* entry,
+                                                AbstractCode* abstract_code) {
+  if (abstract_code->kind() != AbstractCode::OPTIMIZED_FUNCTION) return;
+  Code* code = abstract_code->GetCode();
+  DeoptimizationInputData* deopt_input_data =
+      DeoptimizationInputData::cast(code->deoptimization_data());
+  int const mask = RelocInfo::ModeMask(RelocInfo::DEOPT_ID);
+  for (RelocIterator rit(code, mask); !rit.done(); rit.next()) {
+    RelocInfo* reloc_info = rit.rinfo();
+    DCHECK(RelocInfo::IsDeoptId(reloc_info->rmode()));
+    int deopt_id = static_cast<int>(reloc_info->data());
+    int translation_index =
+        deopt_input_data->TranslationIndex(deopt_id)->value();
+    TranslationIterator it(deopt_input_data->TranslationByteArray(),
+                           translation_index);
+    Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
+    DCHECK_EQ(Translation::BEGIN, opcode);
+    it.Skip(Translation::NumberOfOperandsFor(opcode));
+    std::vector<CodeEntry::DeoptInlinedFrame> inlined_frames;
+    while (it.HasNext() &&
+           Translation::BEGIN !=
+               (opcode = static_cast<Translation::Opcode>(it.Next()))) {
+      if (opcode != Translation::JS_FRAME &&
+          opcode != Translation::INTERPRETED_FRAME) {
+        it.Skip(Translation::NumberOfOperandsFor(opcode));
+        continue;
+      }
+      BailoutId ast_id = BailoutId(it.Next());
+      int shared_info_id = it.Next();
+      it.Next();  // Skip height
+      SharedFunctionInfo* shared = SharedFunctionInfo::cast(
+          deopt_input_data->LiteralArray()->get(shared_info_id));
+      int source_position = Deoptimizer::ComputeSourcePosition(shared, ast_id);
+      int script_id = v8::UnboundScript::kNoScriptId;
+      if (shared->script()->IsScript()) {
+        Script* script = Script::cast(shared->script());
+        script_id = script->id();
+      }
+      CodeEntry::DeoptInlinedFrame frame = {source_position, script_id};
+      inlined_frames.push_back(frame);
+    }
+    if (!inlined_frames.empty() && !entry->HasDeoptInlinedFramesFor(deopt_id)) {
+      entry->AddDeoptInlinedFrames(deopt_id, inlined_frames);
+      DCHECK(inlined_frames.empty());
+    }
+  }
+}
+
+CodeEntry* ProfilerListener::NewCodeEntry(
+    CodeEventListener::LogEventsAndTags tag, const char* name,
+    const char* name_prefix, const char* resource_name, int line_number,
+    int column_number, JITLineInfoTable* line_info, Address instruction_start) {
+  CodeEntry* code_entry =
+      new CodeEntry(tag, name, name_prefix, resource_name, line_number,
+                    column_number, line_info, instruction_start);
+  code_entries_.push_back(code_entry);
+  return code_entry;
+}
+
+void ProfilerListener::AddObserver(CodeEventObserver* observer) {
+  if (std::find(observers_.begin(), observers_.end(), observer) !=
+      observers_.end())
+    return;
+  observers_.push_back(observer);
+}
+
+void ProfilerListener::RemoveObserver(CodeEventObserver* observer) {
+  auto it = std::find(observers_.begin(), observers_.end(), observer);
+  if (it == observers_.end()) return;
+  observers_.erase(it);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/profiler/profiler-listener.h b/src/profiler/profiler-listener.h
new file mode 100644
index 0000000..7e24cea
--- /dev/null
+++ b/src/profiler/profiler-listener.h
@@ -0,0 +1,97 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PROFILER_PROFILER_LISTENER_H_
+#define V8_PROFILER_PROFILER_LISTENER_H_
+
+#include <vector>
+
+#include "src/code-events.h"
+#include "src/profiler/profile-generator.h"
+
+namespace v8 {
+namespace internal {
+
+class CodeEventsContainer;
+
+class CodeEventObserver {
+ public:
+  virtual void CodeEventHandler(const CodeEventsContainer& evt_rec) = 0;
+  virtual ~CodeEventObserver() {}
+};
+
+class ProfilerListener : public CodeEventListener {
+ public:
+  explicit ProfilerListener(Isolate* isolate);
+  ~ProfilerListener() override;
+
+  void CallbackEvent(Name* name, Address entry_point) override;
+  void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+                       AbstractCode* code, const char* comment) override;
+  void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+                       AbstractCode* code, Name* name) override;
+  void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+                       AbstractCode* code, SharedFunctionInfo* shared,
+                       Name* script_name) override;
+  void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+                       AbstractCode* code, SharedFunctionInfo* shared,
+                       Name* script_name, int line, int column) override;
+  void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+                       AbstractCode* code, int args_count) override;
+  void CodeMovingGCEvent() override {}
+  void CodeMoveEvent(AbstractCode* from, Address to) override;
+  void CodeDisableOptEvent(AbstractCode* code,
+                           SharedFunctionInfo* shared) override;
+  void CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta) override;
+  void GetterCallbackEvent(Name* name, Address entry_point) override;
+  void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
+  void SetterCallbackEvent(Name* name, Address entry_point) override;
+  void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
+
+  CodeEntry* NewCodeEntry(
+      CodeEventListener::LogEventsAndTags tag, const char* name,
+      const char* name_prefix = CodeEntry::kEmptyNamePrefix,
+      const char* resource_name = CodeEntry::kEmptyResourceName,
+      int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
+      int column_number = v8::CpuProfileNode::kNoColumnNumberInfo,
+      JITLineInfoTable* line_info = NULL, Address instruction_start = NULL);
+
+  void AddObserver(CodeEventObserver* observer);
+  void RemoveObserver(CodeEventObserver* observer);
+  V8_INLINE bool HasObservers() { return !observers_.empty(); }
+
+  const char* GetName(Name* name) {
+    return function_and_resource_names_.GetName(name);
+  }
+  const char* GetName(int args_count) {
+    return function_and_resource_names_.GetName(args_count);
+  }
+  const char* GetFunctionName(Name* name) {
+    return function_and_resource_names_.GetFunctionName(name);
+  }
+  const char* GetFunctionName(const char* name) {
+    return function_and_resource_names_.GetFunctionName(name);
+  }
+
+ private:
+  void RecordInliningInfo(CodeEntry* entry, AbstractCode* abstract_code);
+  void RecordDeoptInlinedFrames(CodeEntry* entry, AbstractCode* abstract_code);
+  Name* InferScriptName(Name* name, SharedFunctionInfo* info);
+  V8_INLINE void DispatchCodeEvent(const CodeEventsContainer& evt_rec) {
+    for (auto observer : observers_) {
+      observer->CodeEventHandler(evt_rec);
+    }
+  }
+
+  StringsStorage function_and_resource_names_;
+  std::vector<CodeEntry*> code_entries_;
+  std::vector<CodeEventObserver*> observers_;
+
+  DISALLOW_COPY_AND_ASSIGN(ProfilerListener);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_PROFILER_PROFILER_LISTENER_H_
diff --git a/src/profiler/sampler.cc b/src/profiler/sampler.cc
deleted file mode 100644
index ae47dca..0000000
--- a/src/profiler/sampler.cc
+++ /dev/null
@@ -1,828 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/profiler/sampler.h"
-
-#if V8_OS_POSIX && !V8_OS_CYGWIN
-
-#define USE_SIGNALS
-
-#include <errno.h>
-#include <pthread.h>
-#include <signal.h>
-#include <sys/time.h>
-
-#if !V8_OS_QNX && !V8_OS_NACL && !V8_OS_AIX
-#include <sys/syscall.h>  // NOLINT
-#endif
-
-#if V8_OS_MACOSX
-#include <mach/mach.h>
-// OpenBSD doesn't have <ucontext.h>. ucontext_t lives in <signal.h>
-// and is a typedef for struct sigcontext. There is no uc_mcontext.
-#elif(!V8_OS_ANDROID || defined(__BIONIC_HAVE_UCONTEXT_T)) && \
-    !V8_OS_OPENBSD && !V8_OS_NACL
-#include <ucontext.h>
-#endif
-
-#include <unistd.h>
-
-// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
-// Old versions of the C library <signal.h> didn't define the type.
-#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
-    (defined(__arm__) || defined(__aarch64__)) && \
-    !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
-#include <asm/sigcontext.h>  // NOLINT
-#endif
-
-#elif V8_OS_WIN || V8_OS_CYGWIN
-
-#include "src/base/win32-headers.h"
-
-#endif
-
-#include "src/base/atomic-utils.h"
-#include "src/base/platform/platform.h"
-#include "src/profiler/cpu-profiler-inl.h"
-#include "src/profiler/tick-sample.h"
-#include "src/simulator.h"
-#include "src/v8threads.h"
-
-
-#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
-
-// Not all versions of Android's C library provide ucontext_t.
-// Detect this and provide custom but compatible definitions. Note that these
-// follow the GLibc naming convention to access register values from
-// mcontext_t.
-//
-// See http://code.google.com/p/android/issues/detail?id=34784
-
-#if defined(__arm__)
-
-typedef struct sigcontext mcontext_t;
-
-typedef struct ucontext {
-  uint32_t uc_flags;
-  struct ucontext* uc_link;
-  stack_t uc_stack;
-  mcontext_t uc_mcontext;
-  // Other fields are not used by V8, don't define them here.
-} ucontext_t;
-
-#elif defined(__aarch64__)
-
-typedef struct sigcontext mcontext_t;
-
-typedef struct ucontext {
-  uint64_t uc_flags;
-  struct ucontext *uc_link;
-  stack_t uc_stack;
-  mcontext_t uc_mcontext;
-  // Other fields are not used by V8, don't define them here.
-} ucontext_t;
-
-#elif defined(__mips__)
-// MIPS version of sigcontext, for Android bionic.
-typedef struct {
-  uint32_t regmask;
-  uint32_t status;
-  uint64_t pc;
-  uint64_t gregs[32];
-  uint64_t fpregs[32];
-  uint32_t acx;
-  uint32_t fpc_csr;
-  uint32_t fpc_eir;
-  uint32_t used_math;
-  uint32_t dsp;
-  uint64_t mdhi;
-  uint64_t mdlo;
-  uint32_t hi1;
-  uint32_t lo1;
-  uint32_t hi2;
-  uint32_t lo2;
-  uint32_t hi3;
-  uint32_t lo3;
-} mcontext_t;
-
-typedef struct ucontext {
-  uint32_t uc_flags;
-  struct ucontext* uc_link;
-  stack_t uc_stack;
-  mcontext_t uc_mcontext;
-  // Other fields are not used by V8, don't define them here.
-} ucontext_t;
-
-#elif defined(__i386__)
-// x86 version for Android.
-typedef struct {
-  uint32_t gregs[19];
-  void* fpregs;
-  uint32_t oldmask;
-  uint32_t cr2;
-} mcontext_t;
-
-typedef uint32_t kernel_sigset_t[2];  // x86 kernel uses 64-bit signal masks
-typedef struct ucontext {
-  uint32_t uc_flags;
-  struct ucontext* uc_link;
-  stack_t uc_stack;
-  mcontext_t uc_mcontext;
-  // Other fields are not used by V8, don't define them here.
-} ucontext_t;
-enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
-
-#elif defined(__x86_64__)
-// x64 version for Android.
-typedef struct {
-  uint64_t gregs[23];
-  void* fpregs;
-  uint64_t __reserved1[8];
-} mcontext_t;
-
-typedef struct ucontext {
-  uint64_t uc_flags;
-  struct ucontext *uc_link;
-  stack_t uc_stack;
-  mcontext_t uc_mcontext;
-  // Other fields are not used by V8, don't define them here.
-} ucontext_t;
-enum { REG_RBP = 10, REG_RSP = 15, REG_RIP = 16 };
-#endif
-
-#endif  // V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
-
-
-namespace v8 {
-namespace internal {
-
-namespace {
-
-class PlatformDataCommon : public Malloced {
- public:
-  PlatformDataCommon() : profiled_thread_id_(ThreadId::Current()) {}
-  ThreadId profiled_thread_id() { return profiled_thread_id_; }
-
- protected:
-  ~PlatformDataCommon() {}
-
- private:
-  ThreadId profiled_thread_id_;
-};
-
-
-typedef List<Sampler*> SamplerList;
-
-#if defined(USE_SIGNALS)
-class AtomicGuard {
- public:
-  explicit AtomicGuard(base::AtomicValue<int>* atomic, bool is_block = true)
-      : atomic_(atomic),
-        is_success_(false) {
-    do {
-      // Use Acquire_Load to gain mutual exclusion.
-      USE(atomic_->Value());
-      is_success_ = atomic_->TrySetValue(0, 1);
-    } while (is_block && !is_success_);
-  }
-
-  bool is_success() { return is_success_; }
-
-  ~AtomicGuard() {
-    if (is_success_) {
-      atomic_->SetValue(0);
-    }
-    atomic_ = NULL;
-  }
-
- private:
-  base::AtomicValue<int>* atomic_;
-  bool is_success_;
-};
-
-
-// Returns key for hash map.
-void* ThreadKey(pthread_t thread_id) {
-  return reinterpret_cast<void*>(thread_id);
-}
-
-
-// Returns hash value for hash map.
-uint32_t ThreadHash(pthread_t thread_id) {
-#if V8_OS_MACOSX
-  return static_cast<uint32_t>(reinterpret_cast<intptr_t>(thread_id));
-#else
-  return static_cast<uint32_t>(thread_id);
-#endif
-}
-#endif  // USE_SIGNALS
-
-}  // namespace
-
-#if defined(USE_SIGNALS)
-
-class Sampler::PlatformData : public PlatformDataCommon {
- public:
-  PlatformData() : vm_tid_(pthread_self()) {}
-  pthread_t vm_tid() const { return vm_tid_; }
-
- private:
-  pthread_t vm_tid_;
-};
-
-#elif V8_OS_WIN || V8_OS_CYGWIN
-
-// ----------------------------------------------------------------------------
-// Win32 profiler support. On Cygwin we use the same sampler implementation as
-// on Win32.
-
-class Sampler::PlatformData : public PlatformDataCommon {
- public:
-  // Get a handle to the calling thread. This is the thread that we are
-  // going to profile. We need to make a copy of the handle because we are
-  // going to use it in the sampler thread. Using GetThreadHandle() will
-  // not work in this case. We're using OpenThread because DuplicateHandle
-  // for some reason doesn't work in Chrome's sandbox.
-  PlatformData()
-      : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
-                                    THREAD_SUSPEND_RESUME |
-                                    THREAD_QUERY_INFORMATION,
-                                    false,
-                                    GetCurrentThreadId())) {}
-
-  ~PlatformData() {
-    if (profiled_thread_ != NULL) {
-      CloseHandle(profiled_thread_);
-      profiled_thread_ = NULL;
-    }
-  }
-
-  HANDLE profiled_thread() { return profiled_thread_; }
-
- private:
-  HANDLE profiled_thread_;
-};
-#endif
-
-
-#if defined(USE_SIGNALS)
-
-class SignalHandler : public AllStatic {
- public:
-  static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); }
-  static void TearDown() { delete mutex_; mutex_ = NULL; }
-
-  static void IncreaseSamplerCount() {
-    base::LockGuard<base::Mutex> lock_guard(mutex_);
-    if (++client_count_ == 1) Install();
-  }
-
-  static void DecreaseSamplerCount() {
-    base::LockGuard<base::Mutex> lock_guard(mutex_);
-    if (--client_count_ == 0) Restore();
-  }
-
-  static bool Installed() {
-    return signal_handler_installed_;
-  }
-
-#if !V8_OS_NACL
-  static void CollectSample(void* context, Sampler* sampler);
-#endif
-
- private:
-  static void Install() {
-#if !V8_OS_NACL
-    struct sigaction sa;
-    sa.sa_sigaction = &HandleProfilerSignal;
-    sigemptyset(&sa.sa_mask);
-#if V8_OS_QNX
-    sa.sa_flags = SA_SIGINFO;
-#else
-    sa.sa_flags = SA_RESTART | SA_SIGINFO;
-#endif
-    signal_handler_installed_ =
-        (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
-#endif
-  }
-
-  static void Restore() {
-#if !V8_OS_NACL
-    if (signal_handler_installed_) {
-      sigaction(SIGPROF, &old_signal_handler_, 0);
-      signal_handler_installed_ = false;
-    }
-#endif
-  }
-
-#if !V8_OS_NACL
-  static void HandleProfilerSignal(int signal, siginfo_t* info, void* context);
-#endif
-  // Protects the process wide state below.
-  static base::Mutex* mutex_;
-  static int client_count_;
-  static bool signal_handler_installed_;
-  static struct sigaction old_signal_handler_;
-};
-
-
-base::Mutex* SignalHandler::mutex_ = NULL;
-int SignalHandler::client_count_ = 0;
-struct sigaction SignalHandler::old_signal_handler_;
-bool SignalHandler::signal_handler_installed_ = false;
-
-
-// As Native Client does not support signal handling, profiling is disabled.
-#if !V8_OS_NACL
-void SignalHandler::CollectSample(void* context, Sampler* sampler) {
-  if (sampler == NULL || (!sampler->IsProfiling() &&
-                          !sampler->IsRegistered())) {
-    return;
-  }
-  Isolate* isolate = sampler->isolate();
-
-  // We require a fully initialized and entered isolate.
-  if (isolate == NULL || !isolate->IsInUse()) return;
-
-  if (v8::Locker::IsActive() &&
-      !isolate->thread_manager()->IsLockedByCurrentThread()) {
-    return;
-  }
-
-  v8::RegisterState state;
-
-#if defined(USE_SIMULATOR)
-  if (!SimulatorHelper::FillRegisters(isolate, &state)) return;
-#else
-  // Extracting the sample from the context is extremely machine dependent.
-  ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
-#if !(V8_OS_OPENBSD || (V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390)))
-  mcontext_t& mcontext = ucontext->uc_mcontext;
-#endif
-#if V8_OS_LINUX
-#if V8_HOST_ARCH_IA32
-  state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
-  state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
-  state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
-#elif V8_HOST_ARCH_X64
-  state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
-  state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
-  state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
-#elif V8_HOST_ARCH_ARM
-#if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
-  // Old GLibc ARM versions used a gregs[] array to access the register
-  // values from mcontext_t.
-  state.pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
-  state.sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
-  state.fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
-#else
-  state.pc = reinterpret_cast<Address>(mcontext.arm_pc);
-  state.sp = reinterpret_cast<Address>(mcontext.arm_sp);
-  state.fp = reinterpret_cast<Address>(mcontext.arm_fp);
-#endif  // V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
-#elif V8_HOST_ARCH_ARM64
-  state.pc = reinterpret_cast<Address>(mcontext.pc);
-  state.sp = reinterpret_cast<Address>(mcontext.sp);
-  // FP is an alias for x29.
-  state.fp = reinterpret_cast<Address>(mcontext.regs[29]);
-#elif V8_HOST_ARCH_MIPS
-  state.pc = reinterpret_cast<Address>(mcontext.pc);
-  state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
-  state.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
-#elif V8_HOST_ARCH_MIPS64
-  state.pc = reinterpret_cast<Address>(mcontext.pc);
-  state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
-  state.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
-#elif V8_HOST_ARCH_PPC
-  state.pc = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->nip);
-  state.sp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R1]);
-  state.fp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R31]);
-#elif V8_HOST_ARCH_S390
-#if V8_TARGET_ARCH_32_BIT
-  // 31-bit target will have bit 0 (MSB) of the PSW set to denote addressing
-  // mode.  This bit needs to be masked out to resolve actual address.
-  state.pc =
-      reinterpret_cast<Address>(ucontext->uc_mcontext.psw.addr & 0x7FFFFFFF);
-#else
-  state.pc = reinterpret_cast<Address>(ucontext->uc_mcontext.psw.addr);
-#endif  // V8_TARGET_ARCH_32_BIT
-  state.sp = reinterpret_cast<Address>(ucontext->uc_mcontext.gregs[15]);
-  state.fp = reinterpret_cast<Address>(ucontext->uc_mcontext.gregs[11]);
-#endif  // V8_HOST_ARCH_*
-#elif V8_OS_MACOSX
-#if V8_HOST_ARCH_X64
-#if __DARWIN_UNIX03
-  state.pc = reinterpret_cast<Address>(mcontext->__ss.__rip);
-  state.sp = reinterpret_cast<Address>(mcontext->__ss.__rsp);
-  state.fp = reinterpret_cast<Address>(mcontext->__ss.__rbp);
-#else  // !__DARWIN_UNIX03
-  state.pc = reinterpret_cast<Address>(mcontext->ss.rip);
-  state.sp = reinterpret_cast<Address>(mcontext->ss.rsp);
-  state.fp = reinterpret_cast<Address>(mcontext->ss.rbp);
-#endif  // __DARWIN_UNIX03
-#elif V8_HOST_ARCH_IA32
-#if __DARWIN_UNIX03
-  state.pc = reinterpret_cast<Address>(mcontext->__ss.__eip);
-  state.sp = reinterpret_cast<Address>(mcontext->__ss.__esp);
-  state.fp = reinterpret_cast<Address>(mcontext->__ss.__ebp);
-#else  // !__DARWIN_UNIX03
-  state.pc = reinterpret_cast<Address>(mcontext->ss.eip);
-  state.sp = reinterpret_cast<Address>(mcontext->ss.esp);
-  state.fp = reinterpret_cast<Address>(mcontext->ss.ebp);
-#endif  // __DARWIN_UNIX03
-#endif  // V8_HOST_ARCH_IA32
-#elif V8_OS_FREEBSD
-#if V8_HOST_ARCH_IA32
-  state.pc = reinterpret_cast<Address>(mcontext.mc_eip);
-  state.sp = reinterpret_cast<Address>(mcontext.mc_esp);
-  state.fp = reinterpret_cast<Address>(mcontext.mc_ebp);
-#elif V8_HOST_ARCH_X64
-  state.pc = reinterpret_cast<Address>(mcontext.mc_rip);
-  state.sp = reinterpret_cast<Address>(mcontext.mc_rsp);
-  state.fp = reinterpret_cast<Address>(mcontext.mc_rbp);
-#elif V8_HOST_ARCH_ARM
-  state.pc = reinterpret_cast<Address>(mcontext.mc_r15);
-  state.sp = reinterpret_cast<Address>(mcontext.mc_r13);
-  state.fp = reinterpret_cast<Address>(mcontext.mc_r11);
-#endif  // V8_HOST_ARCH_*
-#elif V8_OS_NETBSD
-#if V8_HOST_ARCH_IA32
-  state.pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]);
-  state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]);
-  state.fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_EBP]);
-#elif V8_HOST_ARCH_X64
-  state.pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_RIP]);
-  state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]);
-  state.fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]);
-#endif  // V8_HOST_ARCH_*
-#elif V8_OS_OPENBSD
-#if V8_HOST_ARCH_IA32
-  state.pc = reinterpret_cast<Address>(ucontext->sc_eip);
-  state.sp = reinterpret_cast<Address>(ucontext->sc_esp);
-  state.fp = reinterpret_cast<Address>(ucontext->sc_ebp);
-#elif V8_HOST_ARCH_X64
-  state.pc = reinterpret_cast<Address>(ucontext->sc_rip);
-  state.sp = reinterpret_cast<Address>(ucontext->sc_rsp);
-  state.fp = reinterpret_cast<Address>(ucontext->sc_rbp);
-#endif  // V8_HOST_ARCH_*
-#elif V8_OS_SOLARIS
-  state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
-  state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
-  state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
-#elif V8_OS_QNX
-#if V8_HOST_ARCH_IA32
-  state.pc = reinterpret_cast<Address>(mcontext.cpu.eip);
-  state.sp = reinterpret_cast<Address>(mcontext.cpu.esp);
-  state.fp = reinterpret_cast<Address>(mcontext.cpu.ebp);
-#elif V8_HOST_ARCH_ARM
-  state.pc = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_PC]);
-  state.sp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_SP]);
-  state.fp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_FP]);
-#endif  // V8_HOST_ARCH_*
-#elif V8_OS_AIX
-  state.pc = reinterpret_cast<Address>(mcontext.jmp_context.iar);
-  state.sp = reinterpret_cast<Address>(mcontext.jmp_context.gpr[1]);
-  state.fp = reinterpret_cast<Address>(mcontext.jmp_context.gpr[31]);
-#endif  // V8_OS_AIX
-#endif  // USE_SIMULATOR
-  sampler->SampleStack(state);
-}
-#endif  // V8_OS_NACL
-
-#endif  // USE_SIGNALS
-
-
-class SamplerThread : public base::Thread {
- public:
-  static const int kSamplerThreadStackSize = 64 * KB;
-
-  explicit SamplerThread(int interval)
-      : Thread(base::Thread::Options("SamplerThread", kSamplerThreadStackSize)),
-        interval_(interval) {}
-
-  static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); }
-  static void TearDown() { delete mutex_; mutex_ = NULL; }
-
-  static void AddActiveSampler(Sampler* sampler) {
-    bool need_to_start = false;
-    base::LockGuard<base::Mutex> lock_guard(mutex_);
-    if (instance_ == NULL) {
-      // Start a thread that will send SIGPROF signal to VM threads,
-      // when CPU profiling will be enabled.
-      instance_ = new SamplerThread(sampler->interval());
-      need_to_start = true;
-    }
-
-    DCHECK(sampler->IsActive());
-    DCHECK(instance_->interval_ == sampler->interval());
-
-#if defined(USE_SIGNALS)
-    AddSampler(sampler);
-#else
-    DCHECK(!instance_->active_samplers_.Contains(sampler));
-    instance_->active_samplers_.Add(sampler);
-#endif  // USE_SIGNALS
-
-    if (need_to_start) instance_->StartSynchronously();
-  }
-
-  static void RemoveSampler(Sampler* sampler) {
-    SamplerThread* instance_to_remove = NULL;
-    {
-      base::LockGuard<base::Mutex> lock_guard(mutex_);
-
-      DCHECK(sampler->IsActive() || sampler->IsRegistered());
-#if defined(USE_SIGNALS)
-      {
-        AtomicGuard atomic_guard(&sampler_list_access_counter_);
-        // Remove sampler from map.
-        pthread_t thread_id = sampler->platform_data()->vm_tid();
-        void* thread_key = ThreadKey(thread_id);
-        uint32_t thread_hash = ThreadHash(thread_id);
-        HashMap::Entry* entry =
-            thread_id_to_samplers_.Get().Lookup(thread_key, thread_hash);
-        DCHECK(entry != NULL);
-        SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
-        samplers->RemoveElement(sampler);
-        if (samplers->is_empty()) {
-          thread_id_to_samplers_.Pointer()->Remove(thread_key, thread_hash);
-          delete samplers;
-        }
-        if (thread_id_to_samplers_.Get().occupancy() == 0) {
-          instance_to_remove = instance_;
-          instance_ = NULL;
-        }
-      }
-#else
-      bool removed = instance_->active_samplers_.RemoveElement(sampler);
-      DCHECK(removed);
-      USE(removed);
-
-      // We cannot delete the instance immediately as we need to Join() the
-      // thread but we are holding mutex_ and the thread may try to acquire it.
-      if (instance_->active_samplers_.is_empty()) {
-        instance_to_remove = instance_;
-        instance_ = NULL;
-      }
-#endif  // USE_SIGNALS
-    }
-
-    if (!instance_to_remove) return;
-    instance_to_remove->Join();
-    delete instance_to_remove;
-  }
-
-  // Unlike AddActiveSampler, this method only adds a sampler,
-  // but won't start the sampler thread.
-  static void RegisterSampler(Sampler* sampler) {
-    base::LockGuard<base::Mutex> lock_guard(mutex_);
-#if defined(USE_SIGNALS)
-    AddSampler(sampler);
-#endif  // USE_SIGNALS
-  }
-
-  // Implement Thread::Run().
-  virtual void Run() {
-    while (true) {
-      {
-        base::LockGuard<base::Mutex> lock_guard(mutex_);
-#if defined(USE_SIGNALS)
-        if (thread_id_to_samplers_.Get().occupancy() == 0) break;
-        if (SignalHandler::Installed()) {
-          for (HashMap::Entry *p = thread_id_to_samplers_.Get().Start();
-               p != NULL; p = thread_id_to_samplers_.Get().Next(p)) {
-#if V8_OS_AIX && V8_TARGET_ARCH_PPC64
-            // on AIX64, cannot cast (void *) to pthread_t which is
-            // of type unsigned int (4bytes)
-            pthread_t thread_id = reinterpret_cast<intptr_t>(p->key);
-#else
-            pthread_t thread_id = reinterpret_cast<pthread_t>(p->key);
-#endif
-            pthread_kill(thread_id, SIGPROF);
-          }
-        }
-#else
-        if (active_samplers_.is_empty()) break;
-        // When CPU profiling is enabled both JavaScript and C++ code is
-        // profiled. We must not suspend.
-        for (int i = 0; i < active_samplers_.length(); ++i) {
-          Sampler* sampler = active_samplers_.at(i);
-          if (!sampler->IsProfiling()) continue;
-          sampler->DoSample();
-        }
-#endif  // USE_SIGNALS
-      }
-      base::OS::Sleep(base::TimeDelta::FromMilliseconds(interval_));
-    }
-  }
-
- private:
-  // Protects the process wide state below.
-  static base::Mutex* mutex_;
-  static SamplerThread* instance_;
-
-  const int interval_;
-
-#if defined(USE_SIGNALS)
-  struct HashMapCreateTrait {
-    static void Construct(HashMap* allocated_ptr) {
-      new (allocated_ptr) HashMap(HashMap::PointersMatch);
-    }
-  };
-  friend class SignalHandler;
-  static base::LazyInstance<HashMap, HashMapCreateTrait>::type
-      thread_id_to_samplers_;
-  static base::AtomicValue<int> sampler_list_access_counter_;
-  static void AddSampler(Sampler* sampler) {
-    AtomicGuard atomic_guard(&sampler_list_access_counter_);
-    // Add sampler into map if needed.
-    pthread_t thread_id = sampler->platform_data()->vm_tid();
-    HashMap::Entry *entry =
-        thread_id_to_samplers_.Pointer()->LookupOrInsert(ThreadKey(thread_id),
-                                                         ThreadHash(thread_id));
-    if (entry->value == NULL) {
-      SamplerList* samplers = new SamplerList();
-      samplers->Add(sampler);
-      entry->value = samplers;
-    } else {
-      SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
-      if (!samplers->Contains(sampler)) {
-        samplers->Add(sampler);
-      }
-    }
-  }
-#else
-  SamplerList active_samplers_;
-#endif  // USE_SIGNALS
-
-  DISALLOW_COPY_AND_ASSIGN(SamplerThread);
-};
-
-
-base::Mutex* SamplerThread::mutex_ = NULL;
-SamplerThread* SamplerThread::instance_ = NULL;
-#if defined(USE_SIGNALS)
-base::LazyInstance<HashMap, SamplerThread::HashMapCreateTrait>::type
-    SamplerThread::thread_id_to_samplers_ = LAZY_INSTANCE_INITIALIZER;
-base::AtomicValue<int> SamplerThread::sampler_list_access_counter_(0);
-
-// As Native Client does not support signal handling, profiling is disabled.
-#if !V8_OS_NACL
-void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
-                                         void* context) {
-  USE(info);
-  if (signal != SIGPROF) return;
-  AtomicGuard atomic_guard(&SamplerThread::sampler_list_access_counter_, false);
-  if (!atomic_guard.is_success()) return;
-  pthread_t thread_id = pthread_self();
-  HashMap::Entry* entry =
-      SamplerThread::thread_id_to_samplers_.Pointer()->Lookup(
-          ThreadKey(thread_id), ThreadHash(thread_id));
-  if (entry == NULL)
-    return;
-  SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
-  for (int i = 0; i < samplers->length(); ++i) {
-    Sampler* sampler = samplers->at(i);
-    CollectSample(context, sampler);
-  }
-}
-#endif  // !V8_OS_NACL
-#endif  // USE_SIGNALs
-
-
-void Sampler::SetUp() {
-#if defined(USE_SIGNALS)
-  SignalHandler::SetUp();
-#endif
-  SamplerThread::SetUp();
-}
-
-
-void Sampler::TearDown() {
-  SamplerThread::TearDown();
-#if defined(USE_SIGNALS)
-  SignalHandler::TearDown();
-#endif
-}
-
-Sampler::Sampler(Isolate* isolate, int interval)
-    : isolate_(isolate),
-      interval_(interval),
-      profiling_(false),
-      has_processing_thread_(false),
-      active_(false),
-      registered_(false),
-      is_counting_samples_(false),
-      js_sample_count_(0),
-      external_sample_count_(0) {
-  data_ = new PlatformData;
-}
-
-Sampler::~Sampler() {
-  DCHECK(!IsActive());
-  if (IsRegistered()) {
-    SamplerThread::RemoveSampler(this);
-  }
-  delete data_;
-}
-
-void Sampler::Start() {
-  DCHECK(!IsActive());
-  SetActive(true);
-  SamplerThread::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
-  DCHECK(IsActive());
-  SamplerThread::RemoveSampler(this);
-  SetActive(false);
-  SetRegistered(false);
-}
-
-
-void Sampler::IncreaseProfilingDepth() {
-  base::NoBarrier_AtomicIncrement(&profiling_, 1);
-#if defined(USE_SIGNALS)
-  SignalHandler::IncreaseSamplerCount();
-#endif
-}
-
-
-void Sampler::DecreaseProfilingDepth() {
-#if defined(USE_SIGNALS)
-  SignalHandler::DecreaseSamplerCount();
-#endif
-  base::NoBarrier_AtomicIncrement(&profiling_, -1);
-}
-
-
-void Sampler::SampleStack(const v8::RegisterState& state) {
-  TickSample* sample = isolate_->cpu_profiler()->StartTickSample();
-  TickSample sample_obj;
-  if (sample == NULL) sample = &sample_obj;
-  sample->Init(isolate_, state, TickSample::kIncludeCEntryFrame, true);
-  if (is_counting_samples_ && !sample->timestamp.IsNull()) {
-    if (sample->state == JS) ++js_sample_count_;
-    if (sample->state == EXTERNAL) ++external_sample_count_;
-  }
-  Tick(sample);
-  if (sample != &sample_obj) {
-    isolate_->cpu_profiler()->FinishTickSample();
-  }
-}
-
-
-#if defined(USE_SIGNALS)
-
-void Sampler::DoSample() {
-  if (!SignalHandler::Installed()) return;
-  if (!IsActive() && !IsRegistered()) {
-    SamplerThread::RegisterSampler(this);
-    SetRegistered(true);
-  }
-  pthread_kill(platform_data()->vm_tid(), SIGPROF);
-}
-
-#elif V8_OS_WIN || V8_OS_CYGWIN
-
-void Sampler::DoSample() {
-  HANDLE profiled_thread = platform_data()->profiled_thread();
-  if (profiled_thread == NULL) return;
-
-  const DWORD kSuspendFailed = static_cast<DWORD>(-1);
-  if (SuspendThread(profiled_thread) == kSuspendFailed) return;
-
-  // Context used for sampling the register state of the profiled thread.
-  CONTEXT context;
-  memset(&context, 0, sizeof(context));
-  context.ContextFlags = CONTEXT_FULL;
-  if (GetThreadContext(profiled_thread, &context) != 0) {
-    v8::RegisterState state;
-#if defined(USE_SIMULATOR)
-    if (!SimulatorHelper::FillRegisters(isolate(), &state)) {
-      ResumeThread(profiled_thread);
-      return;
-    }
-#else
-#if V8_HOST_ARCH_X64
-    state.pc = reinterpret_cast<Address>(context.Rip);
-    state.sp = reinterpret_cast<Address>(context.Rsp);
-    state.fp = reinterpret_cast<Address>(context.Rbp);
-#else
-    state.pc = reinterpret_cast<Address>(context.Eip);
-    state.sp = reinterpret_cast<Address>(context.Esp);
-    state.fp = reinterpret_cast<Address>(context.Ebp);
-#endif
-#endif  // USE_SIMULATOR
-    SampleStack(state);
-  }
-  ResumeThread(profiled_thread);
-}
-
-#endif  // USE_SIGNALS
-
-
-}  // namespace internal
-}  // namespace v8
diff --git a/src/profiler/sampling-heap-profiler.cc b/src/profiler/sampling-heap-profiler.cc
index db9214d..b4361ee 100644
--- a/src/profiler/sampling-heap-profiler.cc
+++ b/src/profiler/sampling-heap-profiler.cc
@@ -7,6 +7,7 @@
 #include <stdint.h>
 #include <memory>
 #include "src/api.h"
+#include "src/base/ieee754.h"
 #include "src/base/utils/random-number-generator.h"
 #include "src/frames-inl.h"
 #include "src/heap/heap.h"
@@ -27,7 +28,7 @@
     return static_cast<intptr_t>(rate);
   }
   double u = random_->NextDouble();
-  double next = (-std::log(u)) * rate;
+  double next = (-base::ieee754::log(u)) * rate;
   return next < kPointerSize
              ? kPointerSize
              : (next > INT_MAX ? INT_MAX : static_cast<intptr_t>(next));
diff --git a/src/profiler/strings-storage.cc b/src/profiler/strings-storage.cc
index 9f095b8..634b6ee 100644
--- a/src/profiler/strings-storage.cc
+++ b/src/profiler/strings-storage.cc
@@ -22,7 +22,8 @@
 
 
 StringsStorage::~StringsStorage() {
-  for (HashMap::Entry* p = names_.Start(); p != NULL; p = names_.Next(p)) {
+  for (base::HashMap::Entry* p = names_.Start(); p != NULL;
+       p = names_.Next(p)) {
     DeleteArray(reinterpret_cast<const char*>(p->value));
   }
 }
@@ -30,7 +31,7 @@
 
 const char* StringsStorage::GetCopy(const char* src) {
   int len = static_cast<int>(strlen(src));
-  HashMap::Entry* entry = GetEntry(src, len);
+  base::HashMap::Entry* entry = GetEntry(src, len);
   if (entry->value == NULL) {
     Vector<char> dst = Vector<char>::New(len + 1);
     StrNCpy(dst, src, len);
@@ -52,7 +53,7 @@
 
 
 const char* StringsStorage::AddOrDisposeString(char* str, int len) {
-  HashMap::Entry* entry = GetEntry(str, len);
+  base::HashMap::Entry* entry = GetEntry(str, len);
   if (entry->value == NULL) {
     // New entry added.
     entry->key = str;
@@ -107,15 +108,15 @@
 
 size_t StringsStorage::GetUsedMemorySize() const {
   size_t size = sizeof(*this);
-  size += sizeof(HashMap::Entry) * names_.capacity();
-  for (HashMap::Entry* p = names_.Start(); p != NULL; p = names_.Next(p)) {
+  size += sizeof(base::HashMap::Entry) * names_.capacity();
+  for (base::HashMap::Entry* p = names_.Start(); p != NULL;
+       p = names_.Next(p)) {
     size += strlen(reinterpret_cast<const char*>(p->value)) + 1;
   }
   return size;
 }
 
-
-HashMap::Entry* StringsStorage::GetEntry(const char* str, int len) {
+base::HashMap::Entry* StringsStorage::GetEntry(const char* str, int len) {
   uint32_t hash = StringHasher::HashSequentialString(str, len, hash_seed_);
   return names_.LookupOrInsert(const_cast<char*>(str), hash);
 }
diff --git a/src/profiler/strings-storage.h b/src/profiler/strings-storage.h
index 0849d63..f98aa5e 100644
--- a/src/profiler/strings-storage.h
+++ b/src/profiler/strings-storage.h
@@ -5,9 +5,11 @@
 #ifndef V8_PROFILER_STRINGS_STORAGE_H_
 #define V8_PROFILER_STRINGS_STORAGE_H_
 
+#include <stdarg.h>
+
 #include "src/allocation.h"
 #include "src/base/compiler-specific.h"
-#include "src/hashmap.h"
+#include "src/base/hashmap.h"
 
 namespace v8 {
 namespace internal {
@@ -34,10 +36,10 @@
 
   static bool StringsMatch(void* key1, void* key2);
   const char* AddOrDisposeString(char* str, int len);
-  HashMap::Entry* GetEntry(const char* str, int len);
+  base::HashMap::Entry* GetEntry(const char* str, int len);
 
   uint32_t hash_seed_;
-  HashMap names_;
+  base::HashMap names_;
 
   DISALLOW_COPY_AND_ASSIGN(StringsStorage);
 };
diff --git a/src/profiler/tick-sample.cc b/src/profiler/tick-sample.cc
index 3edd964..4b48132 100644
--- a/src/profiler/tick-sample.cc
+++ b/src/profiler/tick-sample.cc
@@ -20,7 +20,6 @@
          (reinterpret_cast<uintptr_t>(ptr2) & mask);
 }
 
-
 // Check if the code at specified address could potentially be a
 // frame setup code.
 bool IsNoFrameRegion(Address address) {
@@ -77,7 +76,6 @@
 
 }  // namespace
 
-
 //
 // StackTracer implementation
 //
@@ -86,21 +84,52 @@
                                    RecordCEntryFrame record_c_entry_frame,
                                    bool update_stats) {
   timestamp = base::TimeTicks::HighResolutionNow();
-  pc = reinterpret_cast<Address>(regs.pc);
-  state = isolate->current_vm_state();
   this->update_stats = update_stats;
 
-  // Avoid collecting traces while doing GC.
-  if (state == GC) return;
+  SampleInfo info;
+  if (GetStackSample(isolate, regs, record_c_entry_frame,
+                     reinterpret_cast<void**>(&stack[0]), kMaxFramesCount,
+                     &info)) {
+    state = info.vm_state;
+    pc = static_cast<Address>(regs.pc);
+    frames_count = static_cast<unsigned>(info.frames_count);
+    has_external_callback = info.external_callback_entry != nullptr;
+    if (has_external_callback) {
+      external_callback_entry =
+          static_cast<Address>(info.external_callback_entry);
+    } else if (frames_count) {
+      // sp register may point at an arbitrary place in memory, make
+      // sure MSAN doesn't complain about it.
+      MSAN_MEMORY_IS_INITIALIZED(regs.sp, sizeof(Address));
+      // Sample potential return address value for frameless invocation of
+      // stubs (we'll figure out later, if this value makes sense).
+      tos = Memory::Address_at(reinterpret_cast<Address>(regs.sp));
+    } else {
+      tos = nullptr;
+    }
+  } else {
+    // It is executing JS but failed to collect a stack trace.
+    // Mark the sample as spoiled.
+    timestamp = base::TimeTicks();
+    pc = nullptr;
+  }
+}
+
+bool TickSample::GetStackSample(Isolate* isolate, const v8::RegisterState& regs,
+                                RecordCEntryFrame record_c_entry_frame,
+                                void** frames, size_t frames_limit,
+                                v8::SampleInfo* sample_info) {
+  sample_info->frames_count = 0;
+  sample_info->vm_state = isolate->current_vm_state();
+  sample_info->external_callback_entry = nullptr;
+  if (sample_info->vm_state == GC) return true;
 
   Address js_entry_sp = isolate->js_entry_sp();
-  if (js_entry_sp == 0) return;  // Not executing JS now.
+  if (js_entry_sp == 0) return true;  // Not executing JS now.
 
-  if (pc && IsNoFrameRegion(pc)) {
-    // Can't collect stack. Mark the sample as spoiled.
-    timestamp = base::TimeTicks();
-    pc = 0;
-    return;
+  if (regs.pc && IsNoFrameRegion(static_cast<Address>(regs.pc))) {
+    // Can't collect stack.
+    return false;
   }
 
   ExternalCallbackScope* scope = isolate->external_callback_scope();
@@ -109,48 +138,12 @@
   // we have already entrered JavaScript again and the external callback
   // is not the top function.
   if (scope && scope->scope_address() < handler) {
-    external_callback_entry = *scope->callback_entrypoint_address();
-    has_external_callback = true;
-  } else {
-    // sp register may point at an arbitrary place in memory, make
-    // sure MSAN doesn't complain about it.
-    MSAN_MEMORY_IS_INITIALIZED(regs.sp, sizeof(Address));
-    // Sample potential return address value for frameless invocation of
-    // stubs (we'll figure out later, if this value makes sense).
-    tos = Memory::Address_at(reinterpret_cast<Address>(regs.sp));
-    has_external_callback = false;
+    sample_info->external_callback_entry =
+        *scope->callback_entrypoint_address();
   }
 
   SafeStackFrameIterator it(isolate, reinterpret_cast<Address>(regs.fp),
                             reinterpret_cast<Address>(regs.sp), js_entry_sp);
-  top_frame_type = it.top_frame_type();
-
-  SampleInfo info;
-  GetStackSample(isolate, regs, record_c_entry_frame,
-                 reinterpret_cast<void**>(&stack[0]), kMaxFramesCount, &info);
-  frames_count = static_cast<unsigned>(info.frames_count);
-  if (!frames_count) {
-    // It is executing JS but failed to collect a stack trace.
-    // Mark the sample as spoiled.
-    timestamp = base::TimeTicks();
-    pc = 0;
-  }
-}
-
-
-void TickSample::GetStackSample(Isolate* isolate, const v8::RegisterState& regs,
-                                RecordCEntryFrame record_c_entry_frame,
-                                void** frames, size_t frames_limit,
-                                v8::SampleInfo* sample_info) {
-  sample_info->frames_count = 0;
-  sample_info->vm_state = isolate->current_vm_state();
-  if (sample_info->vm_state == GC) return;
-
-  Address js_entry_sp = isolate->js_entry_sp();
-  if (js_entry_sp == 0) return;  // Not executing JS now.
-
-  SafeStackFrameIterator it(isolate, reinterpret_cast<Address>(regs.fp),
-                            reinterpret_cast<Address>(regs.sp), js_entry_sp);
   size_t i = 0;
   if (record_c_entry_frame == kIncludeCEntryFrame && !it.done() &&
       it.top_frame_type() == StackFrame::EXIT) {
@@ -172,9 +165,9 @@
     it.Advance();
   }
   sample_info->frames_count = i;
+  return true;
 }
 
-
 #if defined(USE_SIMULATOR)
 bool SimulatorHelper::FillRegisters(Isolate* isolate,
                                     v8::RegisterState* state) {
diff --git a/src/profiler/tick-sample.h b/src/profiler/tick-sample.h
index fa2cf21..0a651af 100644
--- a/src/profiler/tick-sample.h
+++ b/src/profiler/tick-sample.h
@@ -36,11 +36,10 @@
         external_callback_entry(NULL),
         frames_count(0),
         has_external_callback(false),
-        update_stats(true),
-        top_frame_type(StackFrame::NONE) {}
+        update_stats(true) {}
   void Init(Isolate* isolate, const v8::RegisterState& state,
             RecordCEntryFrame record_c_entry_frame, bool update_stats);
-  static void GetStackSample(Isolate* isolate, const v8::RegisterState& state,
+  static bool GetStackSample(Isolate* isolate, const v8::RegisterState& state,
                              RecordCEntryFrame record_c_entry_frame,
                              void** frames, size_t frames_limit,
                              v8::SampleInfo* sample_info);
@@ -57,7 +56,6 @@
   unsigned frames_count : kMaxFramesCountLog2;  // Number of captured frames.
   bool has_external_callback : 1;
   bool update_stats : 1;  // Whether the sample should update aggregated stats.
-  StackFrame::Type top_frame_type : 5;
 };
 
 
diff --git a/src/property-descriptor.cc b/src/property-descriptor.cc
index 31efb41..f22a263 100644
--- a/src/property-descriptor.cc
+++ b/src/property-descriptor.cc
@@ -249,7 +249,7 @@
   if (!getter.is_null()) {
     // 18c. If IsCallable(getter) is false and getter is not undefined,
     // throw a TypeError exception.
-    if (!getter->IsCallable() && !getter->IsUndefined()) {
+    if (!getter->IsCallable() && !getter->IsUndefined(isolate)) {
       isolate->Throw(*isolate->factory()->NewTypeError(
           MessageTemplate::kObjectGetterCallable, getter));
       return false;
@@ -267,7 +267,7 @@
   if (!setter.is_null()) {
     // 21c. If IsCallable(setter) is false and setter is not undefined,
     // throw a TypeError exception.
-    if (!setter->IsCallable() && !setter->IsUndefined()) {
+    if (!setter->IsCallable() && !setter->IsUndefined(isolate)) {
       isolate->Throw(*isolate->factory()->NewTypeError(
           MessageTemplate::kObjectSetterCallable, setter));
       return false;
diff --git a/src/property-details.h b/src/property-details.h
index 8df7307..e30d668 100644
--- a/src/property-details.h
+++ b/src/property-details.h
@@ -28,11 +28,6 @@
   // ABSENT can never be stored in or returned from a descriptor's attributes
   // bitfield.  It is only used as a return value meaning the attributes of
   // a non-existent property.
-
-  // When creating a property, EVAL_DECLARED used to indicate that the property
-  // came from a sloppy-mode direct eval, and certain checks need to be done.
-  // Cannot be stored in or returned from a descriptor's attributes bitfield.
-  EVAL_DECLARED = 128
 };
 
 
diff --git a/src/prototype.h b/src/prototype.h
index e09ff0f..032d9b6 100644
--- a/src/prototype.h
+++ b/src/prototype.h
@@ -25,14 +25,12 @@
 
 class PrototypeIterator {
  public:
-  enum WhereToStart { START_AT_RECEIVER, START_AT_PROTOTYPE };
-
   enum WhereToEnd { END_AT_NULL, END_AT_NON_HIDDEN };
 
   const int kProxyPrototypeLimit = 100 * 1000;
 
   PrototypeIterator(Isolate* isolate, Handle<JSReceiver> receiver,
-                    WhereToStart where_to_start = START_AT_PROTOTYPE,
+                    WhereToStart where_to_start = kStartAtPrototype,
                     WhereToEnd where_to_end = END_AT_NULL)
       : object_(NULL),
         handle_(receiver),
@@ -41,32 +39,34 @@
         is_at_end_(false),
         seen_proxies_(0) {
     CHECK(!handle_.is_null());
-    if (where_to_start == START_AT_PROTOTYPE) Advance();
+    if (where_to_start == kStartAtPrototype) Advance();
   }
 
   PrototypeIterator(Isolate* isolate, JSReceiver* receiver,
-                    WhereToStart where_to_start = START_AT_PROTOTYPE,
+                    WhereToStart where_to_start = kStartAtPrototype,
                     WhereToEnd where_to_end = END_AT_NULL)
       : object_(receiver),
         isolate_(isolate),
         where_to_end_(where_to_end),
         is_at_end_(false),
         seen_proxies_(0) {
-    if (where_to_start == START_AT_PROTOTYPE) Advance();
+    if (where_to_start == kStartAtPrototype) Advance();
   }
 
   explicit PrototypeIterator(Map* receiver_map)
       : object_(receiver_map->prototype()),
         isolate_(receiver_map->GetIsolate()),
         where_to_end_(END_AT_NULL),
-        is_at_end_(object_->IsNull()) {}
+        is_at_end_(object_->IsNull(isolate_)),
+        seen_proxies_(0) {}
 
   explicit PrototypeIterator(Handle<Map> receiver_map)
       : object_(NULL),
         handle_(handle(receiver_map->prototype(), receiver_map->GetIsolate())),
         isolate_(receiver_map->GetIsolate()),
         where_to_end_(END_AT_NULL),
-        is_at_end_(handle_->IsNull()) {}
+        is_at_end_(handle_->IsNull(isolate_)),
+        seen_proxies_(0) {}
 
   ~PrototypeIterator() {}
 
@@ -114,7 +114,7 @@
     Object* prototype = map->prototype();
     is_at_end_ = where_to_end_ == END_AT_NON_HIDDEN
                      ? !map->has_hidden_prototype()
-                     : prototype->IsNull();
+                     : prototype->IsNull(isolate_);
 
     if (handle_.is_null()) {
       object_ = prototype;
@@ -153,7 +153,8 @@
     MaybeHandle<Object> proto =
         JSProxy::GetPrototype(Handle<JSProxy>::cast(handle_));
     if (!proto.ToHandle(&handle_)) return false;
-    is_at_end_ = where_to_end_ == END_AT_NON_HIDDEN || handle_->IsNull();
+    is_at_end_ =
+        where_to_end_ == END_AT_NON_HIDDEN || handle_->IsNull(isolate_);
     return true;
   }
 
diff --git a/src/regexp/arm/regexp-macro-assembler-arm.cc b/src/regexp/arm/regexp-macro-assembler-arm.cc
index f8dfc97..bf762b5 100644
--- a/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -9,7 +9,6 @@
 #include "src/code-stubs.h"
 #include "src/log.h"
 #include "src/macro-assembler.h"
-#include "src/profiler/cpu-profiler.h"
 #include "src/regexp/regexp-macro-assembler.h"
 #include "src/regexp/regexp-stack.h"
 #include "src/unicode.h"
diff --git a/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index e8bdad8..96d0c25 100644
--- a/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -9,7 +9,6 @@
 #include "src/code-stubs.h"
 #include "src/log.h"
 #include "src/macro-assembler.h"
-#include "src/profiler/cpu-profiler.h"
 #include "src/regexp/regexp-macro-assembler.h"
 #include "src/regexp/regexp-stack.h"
 #include "src/unicode.h"
diff --git a/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index 9c55af6..6b4ea24 100644
--- a/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -8,7 +8,6 @@
 
 #include "src/log.h"
 #include "src/macro-assembler.h"
-#include "src/profiler/cpu-profiler.h"
 #include "src/regexp/regexp-macro-assembler.h"
 #include "src/regexp/regexp-stack.h"
 #include "src/unicode.h"
diff --git a/src/regexp/jsregexp.cc b/src/regexp/jsregexp.cc
index 6c50f4e..c3b670b 100644
--- a/src/regexp/jsregexp.cc
+++ b/src/regexp/jsregexp.cc
@@ -397,6 +397,7 @@
 
   Handle<FixedArray> data = Handle<FixedArray>(FixedArray::cast(re->data()));
   data->set(JSRegExp::code_index(is_one_byte), result.code);
+  SetIrregexpCaptureNameMap(*data, compile_data.capture_name_map);
   int register_max = IrregexpMaxRegisterCount(*data);
   if (result.num_registers > register_max) {
     SetIrregexpMaxRegisterCount(*data, result.num_registers);
@@ -416,6 +417,14 @@
   re->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(value));
 }
 
+void RegExpImpl::SetIrregexpCaptureNameMap(FixedArray* re,
+                                           Handle<FixedArray> value) {
+  if (value.is_null()) {
+    re->set(JSRegExp::kIrregexpCaptureNameMapIndex, Smi::FromInt(0));
+  } else {
+    re->set(JSRegExp::kIrregexpCaptureNameMapIndex, *value);
+  }
+}
 
 int RegExpImpl::IrregexpNumberOfCaptures(FixedArray* re) {
   return Smi::cast(re->get(JSRegExp::kIrregexpCaptureCountIndex))->value();
diff --git a/src/regexp/jsregexp.h b/src/regexp/jsregexp.h
index e55d650..dc8aee1 100644
--- a/src/regexp/jsregexp.h
+++ b/src/regexp/jsregexp.h
@@ -196,6 +196,8 @@
   // For acting on the JSRegExp data FixedArray.
   static int IrregexpMaxRegisterCount(FixedArray* re);
   static void SetIrregexpMaxRegisterCount(FixedArray* re, int value);
+  static void SetIrregexpCaptureNameMap(FixedArray* re,
+                                        Handle<FixedArray> value);
   static int IrregexpNumberOfCaptures(FixedArray* re);
   static int IrregexpNumberOfRegisters(FixedArray* re);
   static ByteArray* IrregexpByteCode(FixedArray* re, bool is_one_byte);
@@ -1530,6 +1532,7 @@
   RegExpNode* node;
   bool simple;
   bool contains_anchor;
+  Handle<FixedArray> capture_name_map;
   Handle<String> error;
   int capture_count;
 };
diff --git a/src/regexp/ppc/OWNERS b/src/regexp/ppc/OWNERS
index eb007cb..752e8e3 100644
--- a/src/regexp/ppc/OWNERS
+++ b/src/regexp/ppc/OWNERS
@@ -3,3 +3,4 @@
 joransiu@ca.ibm.com
 mbrandy@us.ibm.com
 michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index 70842f5..a7418dd 100644
--- a/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -10,7 +10,6 @@
 #include "src/code-stubs.h"
 #include "src/log.h"
 #include "src/macro-assembler.h"
-#include "src/profiler/cpu-profiler.h"
 #include "src/regexp/regexp-macro-assembler.h"
 #include "src/regexp/regexp-stack.h"
 #include "src/unicode.h"
diff --git a/src/regexp/regexp-ast.h b/src/regexp/regexp-ast.h
index 39c9cee..406bf84 100644
--- a/src/regexp/regexp-ast.h
+++ b/src/regexp/regexp-ast.h
@@ -7,6 +7,7 @@
 
 #include "src/objects.h"
 #include "src/utils.h"
+#include "src/zone-containers.h"
 #include "src/zone.h"
 
 namespace v8 {
@@ -412,7 +413,8 @@
 
 class RegExpCapture final : public RegExpTree {
  public:
-  explicit RegExpCapture(int index) : body_(NULL), index_(index) {}
+  explicit RegExpCapture(int index)
+      : body_(NULL), index_(index), name_(nullptr) {}
   void* Accept(RegExpVisitor* visitor, void* data) override;
   RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
   static RegExpNode* ToNode(RegExpTree* body, int index,
@@ -427,12 +429,15 @@
   RegExpTree* body() { return body_; }
   void set_body(RegExpTree* body) { body_ = body; }
   int index() { return index_; }
+  const ZoneVector<uc16>* name() const { return name_; }
+  void set_name(const ZoneVector<uc16>* name) { name_ = name; }
   static int StartRegister(int index) { return index * 2; }
   static int EndRegister(int index) { return index * 2 + 1; }
 
  private:
   RegExpTree* body_;
   int index_;
+  const ZoneVector<uc16>* name_;
 };
 
 
@@ -489,7 +494,9 @@
 
 class RegExpBackReference final : public RegExpTree {
  public:
-  explicit RegExpBackReference(RegExpCapture* capture) : capture_(capture) {}
+  RegExpBackReference() : capture_(nullptr), name_(nullptr) {}
+  explicit RegExpBackReference(RegExpCapture* capture)
+      : capture_(capture), name_(nullptr) {}
   void* Accept(RegExpVisitor* visitor, void* data) override;
   RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
   RegExpBackReference* AsBackReference() override;
@@ -500,9 +507,13 @@
   int max_match() override { return kInfinity; }
   int index() { return capture_->index(); }
   RegExpCapture* capture() { return capture_; }
+  void set_capture(RegExpCapture* capture) { capture_ = capture; }
+  const ZoneVector<uc16>* name() const { return name_; }
+  void set_name(const ZoneVector<uc16>* name) { name_ = name; }
 
  private:
   RegExpCapture* capture_;
+  const ZoneVector<uc16>* name_;
 };
 
 
diff --git a/src/regexp/regexp-macro-assembler.cc b/src/regexp/regexp-macro-assembler.cc
index 7fed26e..19ecaed 100644
--- a/src/regexp/regexp-macro-assembler.cc
+++ b/src/regexp/regexp-macro-assembler.cc
@@ -177,7 +177,7 @@
     return_value = RETRY;
   } else {
     Object* result = isolate->stack_guard()->HandleInterrupts();
-    if (result->IsException()) return_value = EXCEPTION;
+    if (result->IsException(isolate)) return_value = EXCEPTION;
   }
 
   DisallowHeapAllocation no_gc;
diff --git a/src/regexp/regexp-parser.cc b/src/regexp/regexp-parser.cc
index abb644a..dba81ae 100644
--- a/src/regexp/regexp-parser.cc
+++ b/src/regexp/regexp-parser.cc
@@ -25,6 +25,8 @@
       zone_(zone),
       error_(error),
       captures_(NULL),
+      named_captures_(NULL),
+      named_back_references_(NULL),
       in_(in),
       current_(kEndMarker),
       ignore_case_(flags & JSRegExp::kIgnoreCase),
@@ -73,7 +75,8 @@
   if (has_next()) {
     StackLimitCheck check(isolate());
     if (check.HasOverflowed()) {
-      ReportError(CStrVector(Isolate::kStackOverflowMessage));
+      ReportError(CStrVector(
+          MessageTemplate::TemplateString(MessageTemplate::kStackOverflow)));
     } else if (zone()->excess_allocation()) {
       ReportError(CStrVector("Regular expression too large"));
     } else {
@@ -149,6 +152,7 @@
 //   Disjunction
 RegExpTree* RegExpParser::ParsePattern() {
   RegExpTree* result = ParseDisjunction(CHECK_FAILED);
+  PatchNamedBackReferences(CHECK_FAILED);
   DCHECK(!has_more());
   // If the result of parsing is a literal string atom, and it has the
   // same length as the input, then the atom is identical to the input.
@@ -172,7 +176,7 @@
 RegExpTree* RegExpParser::ParseDisjunction() {
   // Used to store current state while parsing subexpressions.
   RegExpParserState initial_state(NULL, INITIAL, RegExpLookaround::LOOKAHEAD, 0,
-                                  ignore_case(), unicode(), zone());
+                                  nullptr, ignore_case(), unicode(), zone());
   RegExpParserState* state = &initial_state;
   // Cache the builder in a local variable for quick access.
   RegExpBuilder* builder = initial_state.builder();
@@ -204,6 +208,10 @@
 
         // Build result of subexpression.
         if (group_type == CAPTURE) {
+          if (state->IsNamedCapture()) {
+            CreateNamedCaptureAtIndex(state->capture_name(),
+                                      capture_index CHECK_FAILED);
+          }
           RegExpCapture* capture = GetCapture(capture_index);
           capture->set_body(body);
           body = capture;
@@ -268,47 +276,65 @@
       case '(': {
         SubexpressionType subexpr_type = CAPTURE;
         RegExpLookaround::Type lookaround_type = state->lookaround_type();
+        bool is_named_capture = false;
         Advance();
         if (current() == '?') {
           switch (Next()) {
             case ':':
               subexpr_type = GROUPING;
+              Advance(2);
               break;
             case '=':
               lookaround_type = RegExpLookaround::LOOKAHEAD;
               subexpr_type = POSITIVE_LOOKAROUND;
+              Advance(2);
               break;
             case '!':
               lookaround_type = RegExpLookaround::LOOKAHEAD;
               subexpr_type = NEGATIVE_LOOKAROUND;
+              Advance(2);
               break;
             case '<':
+              Advance();
               if (FLAG_harmony_regexp_lookbehind) {
-                Advance();
-                lookaround_type = RegExpLookaround::LOOKBEHIND;
                 if (Next() == '=') {
                   subexpr_type = POSITIVE_LOOKAROUND;
+                  lookaround_type = RegExpLookaround::LOOKBEHIND;
+                  Advance(2);
                   break;
                 } else if (Next() == '!') {
                   subexpr_type = NEGATIVE_LOOKAROUND;
+                  lookaround_type = RegExpLookaround::LOOKBEHIND;
+                  Advance(2);
                   break;
                 }
               }
+              if (FLAG_harmony_regexp_named_captures && unicode()) {
+                is_named_capture = true;
+                Advance();
+                break;
+              }
             // Fall through.
             default:
               return ReportError(CStrVector("Invalid group"));
           }
-          Advance(2);
-        } else {
+        }
+
+        const ZoneVector<uc16>* capture_name = nullptr;
+        if (subexpr_type == CAPTURE) {
           if (captures_started_ >= kMaxCaptures) {
             return ReportError(CStrVector("Too many captures"));
           }
           captures_started_++;
+
+          if (is_named_capture) {
+            capture_name = ParseCaptureGroupName(CHECK_FAILED);
+          }
         }
         // Store current state and begin new disjunction parsing.
         state = new (zone()) RegExpParserState(
             state, subexpr_type, lookaround_type, captures_started_,
-            ignore_case(), unicode(), zone());
+            capture_name, ignore_case(), unicode(), zone());
         builder = state->builder();
         continue;
       }
@@ -362,11 +388,11 @@
               if (FLAG_harmony_regexp_property) {
                 ZoneList<CharacterRange>* ranges =
                     new (zone()) ZoneList<CharacterRange>(2, zone());
-                if (!ParsePropertyClass(ranges)) {
+                if (!ParsePropertyClass(ranges, p == 'P')) {
                   return ReportError(CStrVector("Invalid property name"));
                 }
                 RegExpCharacterClass* cc =
-                    new (zone()) RegExpCharacterClass(ranges, p == 'P');
+                    new (zone()) RegExpCharacterClass(ranges, false);
                 builder->AddCharacterClass(cc);
               } else {
                 // With /u, no identity escapes except for syntax characters
@@ -416,7 +442,7 @@
               break;
             }
           }
-          // FALLTHROUGH
+          // Fall through.
           case '0': {
             Advance();
             if (unicode() && Next() >= '0' && Next() <= '9') {
@@ -497,6 +523,13 @@
             }
             break;
           }
+          case 'k':
+            if (FLAG_harmony_regexp_named_captures && unicode()) {
+              Advance(2);
+              ParseNamedBackReference(builder, state CHECK_FAILED);
+              break;
+            }
+          // Fall through.
           default:
             Advance();
             // With /u, no identity escapes except for syntax characters
@@ -514,14 +547,14 @@
         int dummy;
         bool parsed = ParseIntervalQuantifier(&dummy, &dummy CHECK_FAILED);
         if (parsed) return ReportError(CStrVector("Nothing to repeat"));
-        // fallthrough
+        // Fall through.
       }
       case '}':
       case ']':
         if (unicode()) {
           return ReportError(CStrVector("Lone quantifier brackets"));
         }
-      // fallthrough
+      // Fall through.
       default:
         builder->AddUnicodeCharacter(current());
         Advance();
@@ -675,6 +708,148 @@
   return true;
 }
 
+static void push_code_unit(ZoneVector<uc16>* v, uint32_t code_unit) {
+  if (code_unit <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
+    v->push_back(code_unit);
+  } else {
+    v->push_back(unibrow::Utf16::LeadSurrogate(code_unit));
+    v->push_back(unibrow::Utf16::TrailSurrogate(code_unit));
+  }
+}
+
+const ZoneVector<uc16>* RegExpParser::ParseCaptureGroupName() {
+  DCHECK(FLAG_harmony_regexp_named_captures);
+  DCHECK(unicode());
+
+  ZoneVector<uc16>* name =
+      new (zone()->New(sizeof(ZoneVector<uc16>))) ZoneVector<uc16>(zone());
+
+  bool at_start = true;
+  while (true) {
+    uc32 c = current();
+    Advance();
+
+    // Convert unicode escapes.
+    if (c == '\\' && current() == 'u') {
+      Advance();
+      if (!ParseUnicodeEscape(&c)) {
+        ReportError(CStrVector("Invalid Unicode escape sequence"));
+        return nullptr;
+      }
+    }
+
+    if (at_start) {
+      if (!IdentifierStart::Is(c)) {
+        ReportError(CStrVector("Invalid capture group name"));
+        return nullptr;
+      }
+      push_code_unit(name, c);
+      at_start = false;
+    } else {
+      if (c == '>') {
+        break;
+      } else if (IdentifierPart::Is(c)) {
+        push_code_unit(name, c);
+      } else {
+        ReportError(CStrVector("Invalid capture group name"));
+        return nullptr;
+      }
+    }
+  }
+
+  return name;
+}
+
+bool RegExpParser::CreateNamedCaptureAtIndex(const ZoneVector<uc16>* name,
+                                             int index) {
+  DCHECK(FLAG_harmony_regexp_named_captures);
+  DCHECK(unicode());
+  DCHECK(0 < index && index <= captures_started_);
+  DCHECK_NOT_NULL(name);
+
+  if (named_captures_ == nullptr) {
+    named_captures_ = new (zone()) ZoneList<RegExpCapture*>(1, zone());
+  } else {
+    // Check for duplicates and bail if we find any.
+    for (const auto& named_capture : *named_captures_) {
+      if (*named_capture->name() == *name) {
+        ReportError(CStrVector("Duplicate capture group name"));
+        return false;
+      }
+    }
+  }
+
+  RegExpCapture* capture = GetCapture(index);
+  DCHECK(capture->name() == nullptr);
+
+  capture->set_name(name);
+  named_captures_->Add(capture, zone());
+
+  return true;
+}
+
+bool RegExpParser::ParseNamedBackReference(RegExpBuilder* builder,
+                                           RegExpParserState* state) {
+  // The parser is assumed to be on the '<' in \k<name>.
+  if (current() != '<') {
+    ReportError(CStrVector("Invalid named reference"));
+    return false;
+  }
+
+  Advance();
+  const ZoneVector<uc16>* name = ParseCaptureGroupName();
+  if (name == nullptr) {
+    return false;
+  }
+
+  if (state->IsInsideCaptureGroup(name)) {
+    builder->AddEmpty();
+  } else {
+    RegExpBackReference* atom = new (zone()) RegExpBackReference();
+    atom->set_name(name);
+
+    builder->AddAtom(atom);
+
+    if (named_back_references_ == nullptr) {
+      named_back_references_ =
+          new (zone()) ZoneList<RegExpBackReference*>(1, zone());
+    }
+    named_back_references_->Add(atom, zone());
+  }
+
+  return true;
+}
+
+void RegExpParser::PatchNamedBackReferences() {
+  if (named_back_references_ == nullptr) return;
+
+  if (named_captures_ == nullptr) {
+    ReportError(CStrVector("Invalid named capture referenced"));
+    return;
+  }
+
+  // Look up and patch the actual capture for each named back reference.
+  // TODO(jgruber): O(n^2), optimize if necessary.
+
+  for (int i = 0; i < named_back_references_->length(); i++) {
+    RegExpBackReference* ref = named_back_references_->at(i);
+
+    int index = -1;
+    for (const auto& capture : *named_captures_) {
+      if (*capture->name() == *ref->name()) {
+        index = capture->index();
+        break;
+      }
+    }
+
+    if (index == -1) {
+      ReportError(CStrVector("Invalid named capture referenced"));
+      return;
+    }
+
+    ref->set_capture(GetCapture(index));
+  }
+}
 
 RegExpCapture* RegExpParser::GetCapture(int index) {
   // The index for the capture groups are one-based. Its index in the list is
@@ -691,6 +866,24 @@
   return captures_->at(index - 1);
 }
 
+Handle<FixedArray> RegExpParser::CreateCaptureNameMap() {
+  if (named_captures_ == nullptr || named_captures_->is_empty())
+    return Handle<FixedArray>();
+
+  Factory* factory = isolate()->factory();
+
+  int len = named_captures_->length() * 2;
+  Handle<FixedArray> array = factory->NewFixedArray(len);
+
+  for (int i = 0; i < named_captures_->length(); i++) {
+    RegExpCapture* capture = named_captures_->at(i);
+    MaybeHandle<String> name = factory->NewStringFromTwoByte(capture->name());
+    array->set(i * 2, *name.ToHandleChecked());
+    array->set(i * 2 + 1, Smi::FromInt(capture->index()));
+  }
+
+  return array;
+}
 
 bool RegExpParser::RegExpParserState::IsInsideCaptureGroup(int index) {
   for (RegExpParserState* s = this; s != NULL; s = s->previous_state()) {
@@ -703,6 +896,15 @@
   return false;
 }
 
+bool RegExpParser::RegExpParserState::IsInsideCaptureGroup(
+    const ZoneVector<uc16>* name) {
+  DCHECK_NOT_NULL(name);
+  for (RegExpParserState* s = this; s != NULL; s = s->previous_state()) {
+    if (s->capture_name() == nullptr) continue;
+    if (*s->capture_name() == *name) return true;
+  }
+  return false;
+}
 
 // QuantifierPrefix ::
 //   { DecimalDigits }
@@ -845,6 +1047,9 @@
 }
 
 #ifdef V8_I18N_SUPPORT
+
+namespace {
+
 bool IsExactPropertyAlias(const char* property_name, UProperty property) {
   const char* short_name = u_getPropertyName(property, U_SHORT_PROPERTY_NAME);
   if (short_name != NULL && strcmp(property_name, short_name) == 0) return true;
@@ -875,7 +1080,7 @@
 }
 
 bool LookupPropertyValueName(UProperty property,
-                             const char* property_value_name,
+                             const char* property_value_name, bool negate,
                              ZoneList<CharacterRange>* result, Zone* zone) {
   int32_t property_value =
       u_getPropertyValueEnum(property, property_value_name);
@@ -895,6 +1100,7 @@
 
   if (success) {
     uset_removeAllStrings(set);
+    if (negate) uset_complement(set);
     int item_count = uset_getItemCount(set);
     int item_result = 0;
     for (int i = 0; i < item_count; i++) {
@@ -910,9 +1116,34 @@
   return success;
 }
 
-bool RegExpParser::ParsePropertyClass(ZoneList<CharacterRange>* result) {
+template <size_t N>
+inline bool NameEquals(const char* name, const char (&literal)[N]) {
+  return strncmp(name, literal, N + 1) == 0;
+}
+
+bool LookupSpecialPropertyValueName(const char* name,
+                                    ZoneList<CharacterRange>* result,
+                                    bool negate, Zone* zone) {
+  if (NameEquals(name, "Any")) {
+    if (!negate) result->Add(CharacterRange::Everything(), zone);
+  } else if (NameEquals(name, "ASCII")) {
+    result->Add(negate ? CharacterRange::Range(0x80, String::kMaxCodePoint)
+                       : CharacterRange::Range(0x0, 0x7f),
+                zone);
+  } else if (NameEquals(name, "Assigned")) {
+    return LookupPropertyValueName(UCHAR_GENERAL_CATEGORY, "Unassigned",
+                                   !negate, result, zone);
+  } else {
+    return false;
+  }
+  return true;
+}
+
+}  // anonymous namespace
+
+bool RegExpParser::ParsePropertyClass(ZoneList<CharacterRange>* result,
+                                      bool negate) {
   // Parse the property class as follows:
-  // - \pN with a single-character N is equivalent to \p{N}
   // - In \p{name}, 'name' is interpreted
   //   - either as a general category property value name.
   //   - or as a binary property name.
@@ -935,9 +1166,6 @@
       }
       second_part.Add(0);  // null-terminate string.
     }
-  } else if (current() != kEndMarker) {
-    // Parse \pN, where N is a single-character property name value.
-    first_part.Add(static_cast<char>(current()));
   } else {
     return false;
   }
@@ -947,8 +1175,12 @@
   if (second_part.is_empty()) {
     // First attempt to interpret as general category property value name.
     const char* name = first_part.ToConstVector().start();
-    if (LookupPropertyValueName(UCHAR_GENERAL_CATEGORY_MASK, name, result,
-                                zone())) {
+    if (LookupPropertyValueName(UCHAR_GENERAL_CATEGORY_MASK, name, negate,
+                                result, zone())) {
+      return true;
+    }
+    // Interpret "Any", "ASCII", and "Assigned".
+    if (LookupSpecialPropertyValueName(name, result, negate, zone())) {
       return true;
     }
     // Then attempt to interpret as binary property name with value name 'Y'.
@@ -956,7 +1188,8 @@
     if (property < UCHAR_BINARY_START) return false;
     if (property >= UCHAR_BINARY_LIMIT) return false;
     if (!IsExactPropertyAlias(name, property)) return false;
-    return LookupPropertyValueName(property, "Y", result, zone());
+    return LookupPropertyValueName(property, negate ? "N" : "Y", false, result,
+                                   zone());
   } else {
     // Both property name and value name are specified. Attempt to interpret
     // the property name as enumerated property.
@@ -966,13 +1199,15 @@
     if (property < UCHAR_INT_START) return false;
     if (property >= UCHAR_INT_LIMIT) return false;
     if (!IsExactPropertyAlias(property_name, property)) return false;
-    return LookupPropertyValueName(property, value_name, result, zone());
+    return LookupPropertyValueName(property, value_name, negate, result,
+                                   zone());
   }
 }
 
 #else  // V8_I18N_SUPPORT
 
-bool RegExpParser::ParsePropertyClass(ZoneList<CharacterRange>* result) {
+bool RegExpParser::ParsePropertyClass(ZoneList<CharacterRange>* result,
+                                      bool negate) {
   return false;
 }
 
@@ -1139,7 +1374,6 @@
   return CharacterRange::Singleton(first);
 }
 
-
 static const uc16 kNoCharClass = 0;
 
 // Adds range or pre-defined character class to character ranges.
@@ -1163,19 +1397,10 @@
   bool parse_success = false;
   if (next == 'p') {
     Advance(2);
-    parse_success = ParsePropertyClass(ranges);
+    parse_success = ParsePropertyClass(ranges, false);
   } else if (next == 'P') {
     Advance(2);
-    ZoneList<CharacterRange>* property_class =
-        new (zone()) ZoneList<CharacterRange>(2, zone());
-    parse_success = ParsePropertyClass(property_class);
-    if (parse_success) {
-      ZoneList<CharacterRange>* negated =
-          new (zone()) ZoneList<CharacterRange>(2, zone());
-      CharacterRange::Negate(property_class, negated, zone());
-      const Vector<CharacterRange> negated_vector = negated->ToVector();
-      ranges->AddAll(negated_vector, zone());
-    }
+    parse_success = ParsePropertyClass(ranges, true);
   } else {
     return false;
   }
@@ -1272,6 +1497,7 @@
     int capture_count = parser.captures_started();
     result->simple = tree->IsAtom() && parser.simple() && capture_count == 0;
     result->contains_anchor = parser.contains_anchor();
+    result->capture_name_map = parser.CreateCaptureNameMap();
     result->capture_count = capture_count;
   }
   return !parser.failed();
diff --git a/src/regexp/regexp-parser.h b/src/regexp/regexp-parser.h
index 6142a9e..a0b975d 100644
--- a/src/regexp/regexp-parser.h
+++ b/src/regexp/regexp-parser.h
@@ -174,7 +174,7 @@
   bool ParseHexEscape(int length, uc32* value);
   bool ParseUnicodeEscape(uc32* value);
   bool ParseUnlimitedLengthHexNumber(int max_value, uc32* value);
-  bool ParsePropertyClass(ZoneList<CharacterRange>* result);
+  bool ParsePropertyClass(ZoneList<CharacterRange>* result, bool negate);
 
   uc32 ParseOctalLiteral();
 
@@ -222,13 +222,15 @@
     RegExpParserState(RegExpParserState* previous_state,
                       SubexpressionType group_type,
                       RegExpLookaround::Type lookaround_type,
-                      int disjunction_capture_index, bool ignore_case,
+                      int disjunction_capture_index,
+                      const ZoneVector<uc16>* capture_name, bool ignore_case,
                       bool unicode, Zone* zone)
         : previous_state_(previous_state),
           builder_(new (zone) RegExpBuilder(zone, ignore_case, unicode)),
           group_type_(group_type),
           lookaround_type_(lookaround_type),
-          disjunction_capture_index_(disjunction_capture_index) {}
+          disjunction_capture_index_(disjunction_capture_index),
+          capture_name_(capture_name) {}
     // Parser state of containing expression, if any.
     RegExpParserState* previous_state() { return previous_state_; }
     bool IsSubexpression() { return previous_state_ != NULL; }
@@ -242,9 +244,16 @@
     // Also the capture index of this sub-expression itself, if group_type
     // is CAPTURE.
     int capture_index() { return disjunction_capture_index_; }
+    // The name of the current sub-expression, if group_type is CAPTURE. Only
+    // used for named captures.
+    const ZoneVector<uc16>* capture_name() { return capture_name_; }
+
+    bool IsNamedCapture() const { return capture_name_ != nullptr; }
 
     // Check whether the parser is inside a capture group with the given index.
     bool IsInsideCaptureGroup(int index);
+    // Check whether the parser is inside a capture group with the given name.
+    bool IsInsideCaptureGroup(const ZoneVector<uc16>* name);
 
    private:
     // Linked list implementation of stack of states.
@@ -257,11 +266,32 @@
     RegExpLookaround::Type lookaround_type_;
     // Stored disjunction's capture index (if any).
     int disjunction_capture_index_;
+    // Stored capture name (if any).
+    const ZoneVector<uc16>* capture_name_;
   };
 
   // Return the 1-indexed RegExpCapture object, allocate if necessary.
   RegExpCapture* GetCapture(int index);
 
+  // Creates a new named capture at the specified index. Must be called exactly
+  // once for each named capture. Fails if a capture with the same name is
+  // encountered.
+  bool CreateNamedCaptureAtIndex(const ZoneVector<uc16>* name, int index);
+
+  // Parses the name of a capture group (?<name>pattern). The name must adhere
+  // to IdentifierName in the ECMAScript standard.
+  const ZoneVector<uc16>* ParseCaptureGroupName();
+
+  bool ParseNamedBackReference(RegExpBuilder* builder,
+                               RegExpParserState* state);
+
+  // After the initial parsing pass, patch corresponding RegExpCapture objects
+  // into all RegExpBackReferences. This is done after initial parsing in order
+  // to avoid complicating cases in which references comes before the capture.
+  void PatchNamedBackReferences();
+
+  Handle<FixedArray> CreateCaptureNameMap();
+
   Isolate* isolate() { return isolate_; }
   Zone* zone() const { return zone_; }
 
@@ -278,6 +308,8 @@
   Zone* zone_;
   Handle<String>* error_;
   ZoneList<RegExpCapture*>* captures_;
+  ZoneList<RegExpCapture*>* named_captures_;
+  ZoneList<RegExpBackReference*>* named_back_references_;
   FlatStringReader* in_;
   uc32 current_;
   bool ignore_case_;
diff --git a/src/regexp/s390/OWNERS b/src/regexp/s390/OWNERS
index eb007cb..752e8e3 100644
--- a/src/regexp/s390/OWNERS
+++ b/src/regexp/s390/OWNERS
@@ -3,3 +3,4 @@
 joransiu@ca.ibm.com
 mbrandy@us.ibm.com
 michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/src/regexp/s390/regexp-macro-assembler-s390.cc b/src/regexp/s390/regexp-macro-assembler-s390.cc
index 9dac534..d9ca1df 100644
--- a/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -10,7 +10,6 @@
 #include "src/code-stubs.h"
 #include "src/log.h"
 #include "src/macro-assembler.h"
-#include "src/profiler/cpu-profiler.h"
 #include "src/regexp/regexp-macro-assembler.h"
 #include "src/regexp/regexp-stack.h"
 #include "src/regexp/s390/regexp-macro-assembler-s390.h"
diff --git a/src/regexp/x64/regexp-macro-assembler-x64.cc b/src/regexp/x64/regexp-macro-assembler-x64.cc
index 5d73b43..aafc840 100644
--- a/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -8,7 +8,6 @@
 
 #include "src/log.h"
 #include "src/macro-assembler.h"
-#include "src/profiler/cpu-profiler.h"
 #include "src/regexp/regexp-macro-assembler.h"
 #include "src/regexp/regexp-stack.h"
 #include "src/unicode.h"
diff --git a/src/regexp/x87/regexp-macro-assembler-x87.cc b/src/regexp/x87/regexp-macro-assembler-x87.cc
index 9f15b1c..4a1c3a8 100644
--- a/src/regexp/x87/regexp-macro-assembler-x87.cc
+++ b/src/regexp/x87/regexp-macro-assembler-x87.cc
@@ -8,7 +8,6 @@
 
 #include "src/log.h"
 #include "src/macro-assembler.h"
-#include "src/profiler/cpu-profiler.h"
 #include "src/regexp/regexp-macro-assembler.h"
 #include "src/regexp/regexp-stack.h"
 #include "src/unicode.h"
diff --git a/src/register-configuration.cc b/src/register-configuration.cc
index ab5c692..148c3fc 100644
--- a/src/register-configuration.cc
+++ b/src/register-configuration.cc
@@ -33,6 +33,12 @@
 #undef REGISTER_NAME
 };
 
+static const char* const kFloatRegisterNames[] = {
+#define REGISTER_NAME(R) #R,
+    FLOAT_REGISTERS(REGISTER_NAME)
+#undef REGISTER_NAME
+};
+
 static const char* const kDoubleRegisterNames[] = {
 #define REGISTER_NAME(R) #R,
     DOUBLE_REGISTERS(REGISTER_NAME)
@@ -44,113 +50,99 @@
 STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
               DoubleRegister::kMaxNumRegisters);
 
+enum CompilerSelector { CRANKSHAFT, TURBOFAN };
+
 class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
  public:
   explicit ArchDefaultRegisterConfiguration(CompilerSelector compiler)
-      : RegisterConfiguration(Register::kNumRegisters,
-                              DoubleRegister::kMaxNumRegisters,
+      : RegisterConfiguration(
+            Register::kNumRegisters, DoubleRegister::kMaxNumRegisters,
 #if V8_TARGET_ARCH_IA32
-                              kMaxAllocatableGeneralRegisterCount,
-                              kMaxAllocatableDoubleRegisterCount,
-                              kMaxAllocatableDoubleRegisterCount,
+            kMaxAllocatableGeneralRegisterCount,
+            kMaxAllocatableDoubleRegisterCount,
 #elif V8_TARGET_ARCH_X87
-                              kMaxAllocatableGeneralRegisterCount,
-                              compiler == TURBOFAN
-                                  ? 1
-                                  : kMaxAllocatableDoubleRegisterCount,
-                              compiler == TURBOFAN
-                                  ? 1
-                                  : kMaxAllocatableDoubleRegisterCount,
+            kMaxAllocatableGeneralRegisterCount,
+            compiler == TURBOFAN ? 1 : kMaxAllocatableDoubleRegisterCount,
 #elif V8_TARGET_ARCH_X64
-                              kMaxAllocatableGeneralRegisterCount,
-                              kMaxAllocatableDoubleRegisterCount,
-                              kMaxAllocatableDoubleRegisterCount,
+            kMaxAllocatableGeneralRegisterCount,
+            kMaxAllocatableDoubleRegisterCount,
 #elif V8_TARGET_ARCH_ARM
-                              FLAG_enable_embedded_constant_pool
-                                  ? (kMaxAllocatableGeneralRegisterCount - 1)
-                                  : kMaxAllocatableGeneralRegisterCount,
-                              CpuFeatures::IsSupported(VFP32DREGS)
-                                  ? kMaxAllocatableDoubleRegisterCount
-                                  : (ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(
-                                        REGISTER_COUNT)0),
-                              ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(
-                                  REGISTER_COUNT)0,
+            FLAG_enable_embedded_constant_pool
+                ? (kMaxAllocatableGeneralRegisterCount - 1)
+                : kMaxAllocatableGeneralRegisterCount,
+            CpuFeatures::IsSupported(VFP32DREGS)
+                ? kMaxAllocatableDoubleRegisterCount
+                : (ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(REGISTER_COUNT) 0),
 #elif V8_TARGET_ARCH_ARM64
-                              kMaxAllocatableGeneralRegisterCount,
-                              kMaxAllocatableDoubleRegisterCount,
-                              kMaxAllocatableDoubleRegisterCount,
+            kMaxAllocatableGeneralRegisterCount,
+            kMaxAllocatableDoubleRegisterCount,
 #elif V8_TARGET_ARCH_MIPS
-                              kMaxAllocatableGeneralRegisterCount,
-                              kMaxAllocatableDoubleRegisterCount,
-                              kMaxAllocatableDoubleRegisterCount,
+            kMaxAllocatableGeneralRegisterCount,
+            kMaxAllocatableDoubleRegisterCount,
 #elif V8_TARGET_ARCH_MIPS64
-                              kMaxAllocatableGeneralRegisterCount,
-                              kMaxAllocatableDoubleRegisterCount,
-                              kMaxAllocatableDoubleRegisterCount,
+            kMaxAllocatableGeneralRegisterCount,
+            kMaxAllocatableDoubleRegisterCount,
 #elif V8_TARGET_ARCH_PPC
-                              kMaxAllocatableGeneralRegisterCount,
-                              kMaxAllocatableDoubleRegisterCount,
-                              kMaxAllocatableDoubleRegisterCount,
+            kMaxAllocatableGeneralRegisterCount,
+            kMaxAllocatableDoubleRegisterCount,
 #elif V8_TARGET_ARCH_S390
-                              kMaxAllocatableGeneralRegisterCount,
-                              kMaxAllocatableDoubleRegisterCount,
-                              kMaxAllocatableDoubleRegisterCount,
+            kMaxAllocatableGeneralRegisterCount,
+            kMaxAllocatableDoubleRegisterCount,
 #else
 #error Unsupported target architecture.
 #endif
-                              kAllocatableGeneralCodes, kAllocatableDoubleCodes,
-                              kGeneralRegisterNames, kDoubleRegisterNames) {
+            kAllocatableGeneralCodes, kAllocatableDoubleCodes,
+            kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE,
+            kGeneralRegisterNames, kFloatRegisterNames, kDoubleRegisterNames) {
   }
 };
 
-
-template <RegisterConfiguration::CompilerSelector compiler>
+template <CompilerSelector compiler>
 struct RegisterConfigurationInitializer {
   static void Construct(ArchDefaultRegisterConfiguration* config) {
     new (config) ArchDefaultRegisterConfiguration(compiler);
   }
 };
 
-static base::LazyInstance<
-    ArchDefaultRegisterConfiguration,
-    RegisterConfigurationInitializer<RegisterConfiguration::CRANKSHAFT>>::type
+static base::LazyInstance<ArchDefaultRegisterConfiguration,
+                          RegisterConfigurationInitializer<CRANKSHAFT>>::type
     kDefaultRegisterConfigurationForCrankshaft = LAZY_INSTANCE_INITIALIZER;
 
-
-static base::LazyInstance<
-    ArchDefaultRegisterConfiguration,
-    RegisterConfigurationInitializer<RegisterConfiguration::TURBOFAN>>::type
+static base::LazyInstance<ArchDefaultRegisterConfiguration,
+                          RegisterConfigurationInitializer<TURBOFAN>>::type
     kDefaultRegisterConfigurationForTurboFan = LAZY_INSTANCE_INITIALIZER;
 
 }  // namespace
 
-
-const RegisterConfiguration* RegisterConfiguration::ArchDefault(
-    CompilerSelector compiler) {
-  return compiler == TURBOFAN
-             ? &kDefaultRegisterConfigurationForTurboFan.Get()
-             : &kDefaultRegisterConfigurationForCrankshaft.Get();
+const RegisterConfiguration* RegisterConfiguration::Crankshaft() {
+  return &kDefaultRegisterConfigurationForCrankshaft.Get();
 }
 
+const RegisterConfiguration* RegisterConfiguration::Turbofan() {
+  return &kDefaultRegisterConfigurationForTurboFan.Get();
+}
 
 RegisterConfiguration::RegisterConfiguration(
     int num_general_registers, int num_double_registers,
     int num_allocatable_general_registers, int num_allocatable_double_registers,
-    int num_allocatable_aliased_double_registers,
     const int* allocatable_general_codes, const int* allocatable_double_codes,
-    const char* const* general_register_names,
+    AliasingKind fp_aliasing_kind, const char* const* general_register_names,
+    const char* const* float_register_names,
     const char* const* double_register_names)
     : num_general_registers_(num_general_registers),
+      num_float_registers_(0),
       num_double_registers_(num_double_registers),
       num_allocatable_general_registers_(num_allocatable_general_registers),
       num_allocatable_double_registers_(num_allocatable_double_registers),
-      num_allocatable_aliased_double_registers_(
-          num_allocatable_aliased_double_registers),
+      num_allocatable_float_registers_(0),
       allocatable_general_codes_mask_(0),
       allocatable_double_codes_mask_(0),
+      allocatable_float_codes_mask_(0),
       allocatable_general_codes_(allocatable_general_codes),
       allocatable_double_codes_(allocatable_double_codes),
+      fp_aliasing_kind_(fp_aliasing_kind),
       general_register_names_(general_register_names),
+      float_register_names_(float_register_names),
       double_register_names_(double_register_names) {
   DCHECK(num_general_registers_ <= RegisterConfiguration::kMaxGeneralRegisters);
   DCHECK(num_double_registers_ <= RegisterConfiguration::kMaxFPRegisters);
@@ -160,6 +152,81 @@
   for (int i = 0; i < num_allocatable_double_registers_; ++i) {
     allocatable_double_codes_mask_ |= (1 << allocatable_double_codes_[i]);
   }
+
+  if (fp_aliasing_kind_ == COMBINE) {
+    num_float_registers_ = num_double_registers_ * 2 <= kMaxFPRegisters
+                               ? num_double_registers_ * 2
+                               : kMaxFPRegisters;
+    num_allocatable_float_registers_ = 0;
+    for (int i = 0; i < num_allocatable_double_registers_; i++) {
+      int base_code = allocatable_double_codes_[i] * 2;
+      if (base_code >= kMaxFPRegisters) continue;
+      allocatable_float_codes_[num_allocatable_float_registers_++] = base_code;
+      allocatable_float_codes_[num_allocatable_float_registers_++] =
+          base_code + 1;
+      allocatable_float_codes_mask_ |= (0x3 << base_code);
+    }
+  } else {
+    DCHECK(fp_aliasing_kind_ == OVERLAP);
+    num_float_registers_ = num_double_registers_;
+    num_allocatable_float_registers_ = num_allocatable_double_registers_;
+    for (int i = 0; i < num_allocatable_float_registers_; ++i) {
+      allocatable_float_codes_[i] = allocatable_double_codes_[i];
+    }
+    allocatable_float_codes_mask_ = allocatable_double_codes_mask_;
+  }
+}
+
+int RegisterConfiguration::GetAliases(MachineRepresentation rep, int index,
+                                      MachineRepresentation other_rep,
+                                      int* alias_base_index) const {
+  DCHECK(fp_aliasing_kind_ == COMBINE);
+  DCHECK(rep == MachineRepresentation::kFloat32 ||
+         rep == MachineRepresentation::kFloat64);
+  DCHECK(other_rep == MachineRepresentation::kFloat32 ||
+         other_rep == MachineRepresentation::kFloat64);
+  if (rep == other_rep) {
+    *alias_base_index = index;
+    return 1;
+  }
+  if (rep == MachineRepresentation::kFloat32) {
+    DCHECK(other_rep == MachineRepresentation::kFloat64);
+    DCHECK(index < num_allocatable_float_registers_);
+    *alias_base_index = index / 2;
+    return 1;
+  }
+  DCHECK(rep == MachineRepresentation::kFloat64);
+  DCHECK(other_rep == MachineRepresentation::kFloat32);
+  if (index * 2 >= kMaxFPRegisters) {
+    // Alias indices are out of float register range.
+    return 0;
+  }
+  *alias_base_index = index * 2;
+  return 2;
+}
+
+bool RegisterConfiguration::AreAliases(MachineRepresentation rep, int index,
+                                       MachineRepresentation other_rep,
+                                       int other_index) const {
+  DCHECK(fp_aliasing_kind_ == COMBINE);
+  DCHECK(rep == MachineRepresentation::kFloat32 ||
+         rep == MachineRepresentation::kFloat64);
+  DCHECK(other_rep == MachineRepresentation::kFloat32 ||
+         other_rep == MachineRepresentation::kFloat64);
+  if (rep == other_rep) {
+    return index == other_index;
+  }
+  if (rep == MachineRepresentation::kFloat32) {
+    DCHECK(other_rep == MachineRepresentation::kFloat64);
+    return index / 2 == other_index;
+  }
+  DCHECK(rep == MachineRepresentation::kFloat64);
+  DCHECK(other_rep == MachineRepresentation::kFloat32);
+  if (index * 2 >= kMaxFPRegisters) {
+    // Alias indices are out of float register range.
+    return false;
+  }
+  return index == other_index / 2;
 }
 
 #undef REGISTER_COUNT
diff --git a/src/register-configuration.h b/src/register-configuration.h
index c07106e..25f3ef5 100644
--- a/src/register-configuration.h
+++ b/src/register-configuration.h
@@ -6,6 +6,7 @@
 #define V8_COMPILER_REGISTER_CONFIGURATION_H_
 
 #include "src/base/macros.h"
+#include "src/machine-type.h"
 
 namespace v8 {
 namespace internal {
@@ -14,29 +15,35 @@
 // for instruction creation.
 class RegisterConfiguration {
  public:
-  // Define the optimized compiler selector for register configuration
-  // selection.
-  //
-  // TODO(X87): This distinction in RegisterConfigurations is temporary
-  // until x87 TF supports all of the registers that Crankshaft does.
-  enum CompilerSelector { CRANKSHAFT, TURBOFAN };
+  enum AliasingKind {
+    // Registers alias a single register of every other size (e.g. Intel).
+    OVERLAP,
+    // Registers alias two registers of the next smaller size (e.g. ARM).
+    COMBINE
+  };
 
   // Architecture independent maxes.
   static const int kMaxGeneralRegisters = 32;
   static const int kMaxFPRegisters = 32;
 
-  static const RegisterConfiguration* ArchDefault(CompilerSelector compiler);
+  // Default RegisterConfigurations for the target architecture.
+  // TODO(X87): This distinction in RegisterConfigurations is temporary
+  // until x87 TF supports all of the registers that Crankshaft does.
+  static const RegisterConfiguration* Crankshaft();
+  static const RegisterConfiguration* Turbofan();
 
   RegisterConfiguration(int num_general_registers, int num_double_registers,
                         int num_allocatable_general_registers,
                         int num_allocatable_double_registers,
-                        int num_allocatable_aliased_double_registers,
                         const int* allocatable_general_codes,
                         const int* allocatable_double_codes,
+                        AliasingKind fp_aliasing_kind,
                         char const* const* general_names,
+                        char const* const* float_names,
                         char const* const* double_names);
 
   int num_general_registers() const { return num_general_registers_; }
+  int num_float_registers() const { return num_float_registers_; }
   int num_double_registers() const { return num_double_registers_; }
   int num_allocatable_general_registers() const {
     return num_allocatable_general_registers_;
@@ -44,12 +51,10 @@
   int num_allocatable_double_registers() const {
     return num_allocatable_double_registers_;
   }
-  // TODO(turbofan): This is a temporary work-around required because our
-  // register allocator does not yet support the aliasing of single/double
-  // registers on ARM.
-  int num_allocatable_aliased_double_registers() const {
-    return num_allocatable_aliased_double_registers_;
+  int num_allocatable_float_registers() const {
+    return num_allocatable_float_registers_;
   }
+  AliasingKind fp_aliasing_kind() const { return fp_aliasing_kind_; }
   int32_t allocatable_general_codes_mask() const {
     return allocatable_general_codes_mask_;
   }
@@ -59,12 +64,27 @@
   int GetAllocatableGeneralCode(int index) const {
     return allocatable_general_codes_[index];
   }
+  bool IsAllocatableGeneralCode(int index) const {
+    return ((1 << index) & allocatable_general_codes_mask_) != 0;
+  }
   int GetAllocatableDoubleCode(int index) const {
     return allocatable_double_codes_[index];
   }
+  bool IsAllocatableDoubleCode(int index) const {
+    return ((1 << index) & allocatable_double_codes_mask_) != 0;
+  }
+  int GetAllocatableFloatCode(int index) const {
+    return allocatable_float_codes_[index];
+  }
+  bool IsAllocatableFloatCode(int index) const {
+    return ((1 << index) & allocatable_float_codes_mask_) != 0;
+  }
   const char* GetGeneralRegisterName(int code) const {
     return general_register_names_[code];
   }
+  const char* GetFloatRegisterName(int code) const {
+    return float_register_names_[code];
+  }
   const char* GetDoubleRegisterName(int code) const {
     return double_register_names_[code];
   }
@@ -74,18 +94,38 @@
   const int* allocatable_double_codes() const {
     return allocatable_double_codes_;
   }
+  const int* allocatable_float_codes() const {
+    return allocatable_float_codes_;
+  }
+
+  // Aliasing calculations for floating point registers, when fp_aliasing_kind()
+  // is COMBINE. Currently only implemented for kFloat32, or kFloat64 reps.
+  // Returns the number of aliases, and if > 0, alias_base_index is set to the
+  // index of the first alias.
+  int GetAliases(MachineRepresentation rep, int index,
+                 MachineRepresentation other_rep, int* alias_base_index) const;
+  // Returns a value indicating whether two registers alias each other, when
+  // fp_aliasing_kind() is COMBINE. Currently only implemented for kFloat32, or
+  // kFloat64 reps.
+  bool AreAliases(MachineRepresentation rep, int index,
+                  MachineRepresentation other_rep, int other_index) const;
 
  private:
   const int num_general_registers_;
+  int num_float_registers_;
   const int num_double_registers_;
   int num_allocatable_general_registers_;
   int num_allocatable_double_registers_;
-  int num_allocatable_aliased_double_registers_;
+  int num_allocatable_float_registers_;
   int32_t allocatable_general_codes_mask_;
   int32_t allocatable_double_codes_mask_;
+  int32_t allocatable_float_codes_mask_;
   const int* allocatable_general_codes_;
   const int* allocatable_double_codes_;
+  int allocatable_float_codes_[kMaxFPRegisters];
+  AliasingKind fp_aliasing_kind_;
   char const* const* general_register_names_;
+  char const* const* float_register_names_;
   char const* const* double_register_names_;
 };
 
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 1b571a7..6500b9a 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -21,7 +21,7 @@
 
 // Number of times a function has to be seen on the stack before it is
 // compiled for baseline.
-static const int kProfilerTicksBeforeBaseline = 2;
+static const int kProfilerTicksBeforeBaseline = 1;
 // Number of times a function has to be seen on the stack before it is
 // optimized.
 static const int kProfilerTicksBeforeOptimization = 2;
@@ -56,16 +56,14 @@
       any_ic_changed_(false) {
 }
 
-
-static void GetICCounts(SharedFunctionInfo* shared,
-                        int* ic_with_type_info_count, int* ic_generic_count,
-                        int* ic_total_count, int* type_info_percentage,
-                        int* generic_percentage) {
+static void GetICCounts(JSFunction* function, int* ic_with_type_info_count,
+                        int* ic_generic_count, int* ic_total_count,
+                        int* type_info_percentage, int* generic_percentage) {
   *ic_total_count = 0;
   *ic_generic_count = 0;
   *ic_with_type_info_count = 0;
-  if (shared->code()->kind() == Code::FUNCTION) {
-    Code* shared_code = shared->code();
+  if (function->code()->kind() == Code::FUNCTION) {
+    Code* shared_code = function->shared()->code();
     Object* raw_info = shared_code->type_feedback_info();
     if (raw_info->IsTypeFeedbackInfo()) {
       TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
@@ -76,7 +74,7 @@
   }
 
   // Harvest vector-ics as well
-  TypeFeedbackVector* vector = shared->feedback_vector();
+  TypeFeedbackVector* vector = function->feedback_vector();
   int with = 0, gen = 0;
   vector->ComputeCounts(&with, &gen);
   *ic_with_type_info_count += with;
@@ -100,8 +98,8 @@
     PrintF(" for %s recompilation, reason: %s", type, reason);
     if (FLAG_type_info_threshold > 0) {
       int typeinfo, generic, total, type_percentage, generic_percentage;
-      GetICCounts(function->shared(), &typeinfo, &generic, &total,
-                  &type_percentage, &generic_percentage);
+      GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
+                  &generic_percentage);
       PrintF(", ICs with typeinfo: %d/%d (%d%%)", typeinfo, total,
              type_percentage);
       PrintF(", generic ICs: %d/%d (%d%%)", generic, total, generic_percentage);
@@ -219,7 +217,7 @@
 
   if (ticks >= kProfilerTicksBeforeOptimization) {
     int typeinfo, generic, total, type_percentage, generic_percentage;
-    GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage,
+    GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
                 &generic_percentage);
     if (type_percentage >= FLAG_type_info_threshold &&
         generic_percentage <= FLAG_generic_ic_threshold) {
@@ -242,7 +240,7 @@
     // If no IC was patched since the last tick and this function is very
     // small, optimistically optimize it now.
     int typeinfo, generic, total, type_percentage, generic_percentage;
-    GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage,
+    GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
                 &generic_percentage);
     if (type_percentage >= FLAG_type_info_threshold &&
         generic_percentage <= FLAG_generic_ic_threshold) {
diff --git a/src/runtime/runtime-array.cc b/src/runtime/runtime-array.cc
index 519df77..a92215c 100644
--- a/src/runtime/runtime-array.cc
+++ b/src/runtime/runtime-array.cc
@@ -22,8 +22,9 @@
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, prototype, 0);
   Object* length = prototype->length();
-  RUNTIME_ASSERT(length->IsSmi() && Smi::cast(length)->value() == 0);
-  RUNTIME_ASSERT(prototype->HasFastSmiOrObjectElements());
+  CHECK(length->IsSmi());
+  CHECK(Smi::cast(length)->value() == 0);
+  CHECK(prototype->HasFastSmiOrObjectElements());
   // This is necessary to enable fast checks for absence of elements
   // on Array.prototype and below.
   prototype->set_elements(isolate->heap()->empty_fixed_array());
@@ -85,7 +86,7 @@
 
 RUNTIME_FUNCTION(Runtime_TransitionElementsKind) {
   HandleScope scope(isolate);
-  RUNTIME_ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
   CONVERT_ARG_HANDLE_CHECKED(Map, map, 1);
   JSObject::TransitionElementsKind(array, map->elements_kind());
@@ -182,8 +183,14 @@
   DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
   CONVERT_NUMBER_CHECKED(uint32_t, length, Uint32, args[1]);
+  ElementsKind kind = array->GetElementsKind();
 
-  if (array->HasFastStringWrapperElements()) {
+  if (IsFastElementsKind(kind) || IsFixedTypedArrayElementsKind(kind)) {
+    uint32_t actual_length = static_cast<uint32_t>(array->elements()->length());
+    return *isolate->factory()->NewNumberFromUint(Min(actual_length, length));
+  }
+
+  if (kind == FAST_STRING_WRAPPER_ELEMENTS) {
     int string_length =
         String::cast(Handle<JSValue>::cast(array)->value())->length();
     int backing_store_length = array->elements()->length();
@@ -192,17 +199,9 @@
             static_cast<uint32_t>(Max(string_length, backing_store_length))));
   }
 
-  if (!array->elements()->IsDictionary()) {
-    RUNTIME_ASSERT(array->HasFastSmiOrObjectElements() ||
-                   array->HasFastDoubleElements());
-    uint32_t actual_length = static_cast<uint32_t>(array->elements()->length());
-    return *isolate->factory()->NewNumberFromUint(Min(actual_length, length));
-  }
-
-  KeyAccumulator accumulator(isolate, OWN_ONLY, ALL_PROPERTIES);
-  // No need to separate prototype levels since we only get element keys.
-  for (PrototypeIterator iter(isolate, array,
-                              PrototypeIterator::START_AT_RECEIVER);
+  KeyAccumulator accumulator(isolate, KeyCollectionMode::kOwnOnly,
+                             ALL_PROPERTIES);
+  for (PrototypeIterator iter(isolate, array, kStartAtReceiver);
        !iter.IsAtEnd(); iter.Advance()) {
     if (PrototypeIterator::GetCurrent(iter)->IsJSProxy() ||
         PrototypeIterator::GetCurrent<JSObject>(iter)
@@ -211,12 +210,12 @@
       // collecting keys in that case.
       return *isolate->factory()->NewNumberFromUint(length);
     }
-    accumulator.NextPrototype();
     Handle<JSObject> current = PrototypeIterator::GetCurrent<JSObject>(iter);
-    accumulator.CollectOwnElementIndices(current);
+    accumulator.CollectOwnElementIndices(array, current);
   }
   // Erase any keys >= length.
-  Handle<FixedArray> keys = accumulator.GetKeys(KEEP_NUMBERS);
+  Handle<FixedArray> keys =
+      accumulator.GetKeys(GetKeysConversion::kKeepNumbers);
   int j = 0;
   for (int i = 0; i < keys->length(); i++) {
     if (NumberToUint32(keys->get(i)) >= length) continue;
@@ -321,7 +320,6 @@
 
 }  // namespace
 
-
 RUNTIME_FUNCTION(Runtime_NewArray) {
   HandleScope scope(isolate);
   DCHECK_LE(3, args.length());
@@ -338,66 +336,12 @@
   return ArrayConstructorCommon(isolate, constructor, new_target, site, &argv);
 }
 
-
-RUNTIME_FUNCTION(Runtime_ArrayConstructor) {
-  HandleScope scope(isolate);
-  // If we get 2 arguments then they are the stub parameters (constructor, type
-  // info).  If we get 4, then the first one is a pointer to the arguments
-  // passed by the caller, and the last one is the length of the arguments
-  // passed to the caller (redundant, but useful to check on the deoptimizer
-  // with an assert).
-  Arguments empty_args(0, NULL);
-  bool no_caller_args = args.length() == 2;
-  DCHECK(no_caller_args || args.length() == 4);
-  int parameters_start = no_caller_args ? 0 : 1;
-  Arguments* caller_args =
-      no_caller_args ? &empty_args : reinterpret_cast<Arguments*>(args[0]);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, parameters_start);
-  CONVERT_ARG_HANDLE_CHECKED(Object, type_info, parameters_start + 1);
-#ifdef DEBUG
-  if (!no_caller_args) {
-    CONVERT_SMI_ARG_CHECKED(arg_count, parameters_start + 2);
-    DCHECK(arg_count == caller_args->length());
-  }
-#endif
-
-  Handle<AllocationSite> site;
-  if (!type_info.is_null() &&
-      *type_info != isolate->heap()->undefined_value()) {
-    site = Handle<AllocationSite>::cast(type_info);
-    DCHECK(!site->SitePointsToLiteral());
-  }
-
-  return ArrayConstructorCommon(isolate, constructor, constructor, site,
-                                caller_args);
-}
-
-RUNTIME_FUNCTION(Runtime_InternalArrayConstructor) {
-  HandleScope scope(isolate);
-  Arguments empty_args(0, NULL);
-  bool no_caller_args = args.length() == 1;
-  DCHECK(no_caller_args || args.length() == 3);
-  int parameters_start = no_caller_args ? 0 : 1;
-  Arguments* caller_args =
-      no_caller_args ? &empty_args : reinterpret_cast<Arguments*>(args[0]);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, parameters_start);
-#ifdef DEBUG
-  if (!no_caller_args) {
-    CONVERT_SMI_ARG_CHECKED(arg_count, parameters_start + 1);
-    DCHECK(arg_count == caller_args->length());
-  }
-#endif
-  return ArrayConstructorCommon(isolate, constructor, constructor,
-                                Handle<AllocationSite>::null(), caller_args);
-}
-
-
 RUNTIME_FUNCTION(Runtime_NormalizeElements) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
-  RUNTIME_ASSERT(!array->HasFixedTypedArrayElements() &&
-                 !array->IsJSGlobalProxy());
+  CHECK(!array->HasFixedTypedArrayElements());
+  CHECK(!array->IsJSGlobalProxy());
   JSObject::NormalizeElements(array);
   return *array;
 }
@@ -437,8 +381,7 @@
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
-  for (PrototypeIterator iter(isolate, array,
-                              PrototypeIterator::START_AT_RECEIVER);
+  for (PrototypeIterator iter(isolate, array, kStartAtReceiver);
        !iter.IsAtEnd(); iter.Advance()) {
     if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
       return isolate->heap()->true_value();
@@ -491,11 +434,8 @@
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, original_array, 0);
-  Handle<Object> constructor;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, constructor,
-      Object::ArraySpeciesConstructor(isolate, original_array));
-  return *constructor;
+  RETURN_RESULT_OR_FAILURE(
+      isolate, Object::ArraySpeciesConstructor(isolate, original_array));
 }
 
 }  // namespace internal
diff --git a/src/runtime/runtime-atomics.cc b/src/runtime/runtime-atomics.cc
index dd309f7..28a8741 100644
--- a/src/runtime/runtime-atomics.cc
+++ b/src/runtime/runtime-atomics.cc
@@ -354,8 +354,8 @@
   CONVERT_SIZE_ARG_CHECKED(index, 1);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(oldobj, 2);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(newobj, 3);
-  RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
-  RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+  CHECK(sta->GetBuffer()->is_shared());
+  CHECK_LT(index, NumberToSize(isolate, sta->length()));
 
   uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
                     NumberToSize(isolate, sta->byte_offset());
@@ -387,8 +387,8 @@
   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
   CONVERT_SIZE_ARG_CHECKED(index, 1);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
-  RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
-  RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+  CHECK(sta->GetBuffer()->is_shared());
+  CHECK_LT(index, NumberToSize(isolate, sta->length()));
 
   uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
                     NumberToSize(isolate, sta->byte_offset());
@@ -419,8 +419,8 @@
   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
   CONVERT_SIZE_ARG_CHECKED(index, 1);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
-  RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
-  RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+  CHECK(sta->GetBuffer()->is_shared());
+  CHECK_LT(index, NumberToSize(isolate, sta->length()));
 
   uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
                     NumberToSize(isolate, sta->byte_offset());
@@ -451,8 +451,8 @@
   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
   CONVERT_SIZE_ARG_CHECKED(index, 1);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
-  RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
-  RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+  CHECK(sta->GetBuffer()->is_shared());
+  CHECK_LT(index, NumberToSize(isolate, sta->length()));
 
   uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
                     NumberToSize(isolate, sta->byte_offset());
@@ -483,8 +483,8 @@
   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
   CONVERT_SIZE_ARG_CHECKED(index, 1);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
-  RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
-  RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+  CHECK(sta->GetBuffer()->is_shared());
+  CHECK_LT(index, NumberToSize(isolate, sta->length()));
 
   uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
                     NumberToSize(isolate, sta->byte_offset());
@@ -515,8 +515,8 @@
   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
   CONVERT_SIZE_ARG_CHECKED(index, 1);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
-  RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
-  RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+  CHECK(sta->GetBuffer()->is_shared());
+  CHECK_LT(index, NumberToSize(isolate, sta->length()));
 
   uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
                     NumberToSize(isolate, sta->byte_offset());
@@ -547,8 +547,8 @@
   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
   CONVERT_SIZE_ARG_CHECKED(index, 1);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
-  RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
-  RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+  CHECK(sta->GetBuffer()->is_shared());
+  CHECK_LT(index, NumberToSize(isolate, sta->length()));
 
   uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
                     NumberToSize(isolate, sta->byte_offset());
diff --git a/src/runtime/runtime-classes.cc b/src/runtime/runtime-classes.cc
index a784d6d..303122f 100644
--- a/src/runtime/runtime-classes.cc
+++ b/src/runtime/runtime-classes.cc
@@ -88,25 +88,21 @@
   Handle<Object> prototype_parent;
   Handle<Object> constructor_parent;
 
-  if (super_class->IsTheHole()) {
+  if (super_class->IsTheHole(isolate)) {
     prototype_parent = isolate->initial_object_prototype();
   } else {
-    if (super_class->IsNull()) {
+    if (super_class->IsNull(isolate)) {
       prototype_parent = isolate->factory()->null_value();
     } else if (super_class->IsConstructor()) {
-      if (super_class->IsJSFunction() &&
-          Handle<JSFunction>::cast(super_class)->shared()->is_generator()) {
-        THROW_NEW_ERROR(
-            isolate,
-            NewTypeError(MessageTemplate::kExtendsValueGenerator, super_class),
-            Object);
-      }
+      DCHECK(!super_class->IsJSFunction() ||
+             !Handle<JSFunction>::cast(super_class)->shared()->is_resumable());
       ASSIGN_RETURN_ON_EXCEPTION(
           isolate, prototype_parent,
           Runtime::GetObjectProperty(isolate, super_class,
                                      isolate->factory()->prototype_string()),
           Object);
-      if (!prototype_parent->IsNull() && !prototype_parent->IsJSReceiver()) {
+      if (!prototype_parent->IsNull(isolate) &&
+          !prototype_parent->IsJSReceiver()) {
         THROW_NEW_ERROR(
             isolate, NewTypeError(MessageTemplate::kPrototypeParentNotAnObject,
                                   prototype_parent),
@@ -114,10 +110,10 @@
       }
       constructor_parent = super_class;
     } else {
-      THROW_NEW_ERROR(
-          isolate,
-          NewTypeError(MessageTemplate::kExtendsValueNotFunction, super_class),
-          Object);
+      THROW_NEW_ERROR(isolate,
+                      NewTypeError(MessageTemplate::kExtendsValueNotConstructor,
+                                   super_class),
+                      Object);
     }
   }
 
@@ -128,7 +124,7 @@
   map->SetConstructor(*constructor);
   Handle<JSObject> prototype = isolate->factory()->NewJSObjectFromMap(map);
 
-  if (!super_class->IsTheHole()) {
+  if (!super_class->IsTheHole(isolate)) {
     // Derived classes, just like builtins, don't create implicit receivers in
     // [[construct]]. Instead they just set up new.target and call into the
     // constructor. Hence we can reuse the builtins construct stub for derived
@@ -186,11 +182,9 @@
   CONVERT_SMI_ARG_CHECKED(start_position, 2);
   CONVERT_SMI_ARG_CHECKED(end_position, 3);
 
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, DefineClass(isolate, super_class, constructor,
-                                   start_position, end_position));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(
+      isolate, DefineClass(isolate, super_class, constructor, start_position,
+                           end_position));
 }
 
 
@@ -247,10 +241,8 @@
   CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
   CONVERT_ARG_HANDLE_CHECKED(Name, name, 2);
 
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, LoadFromSuper(isolate, receiver, home_object, name));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate,
+                           LoadFromSuper(isolate, receiver, home_object, name));
 }
 
 
@@ -262,13 +254,10 @@
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 2);
 
   uint32_t index = 0;
-  Handle<Object> result;
 
   if (key->ToArrayIndex(&index)) {
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, result,
-        LoadElementFromSuper(isolate, receiver, home_object, index));
-    return *result;
+    RETURN_RESULT_OR_FAILURE(
+        isolate, LoadElementFromSuper(isolate, receiver, home_object, index));
   }
 
   Handle<Name> name;
@@ -276,14 +265,11 @@
                                      Object::ToName(isolate, key));
   // TODO(verwaest): Unify using LookupIterator.
   if (name->AsArrayIndex(&index)) {
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, result,
-        LoadElementFromSuper(isolate, receiver, home_object, index));
-    return *result;
+    RETURN_RESULT_OR_FAILURE(
+        isolate, LoadElementFromSuper(isolate, receiver, home_object, index));
   }
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, LoadFromSuper(isolate, receiver, home_object, name));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate,
+                           LoadFromSuper(isolate, receiver, home_object, name));
 }
 
 
diff --git a/src/runtime/runtime-collections.cc b/src/runtime/runtime-collections.cc
index 65690df..b25a5ef 100644
--- a/src/runtime/runtime-collections.cc
+++ b/src/runtime/runtime-collections.cc
@@ -31,7 +31,7 @@
   SealHandleScope shs(isolate);
   DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(JSObject, object, 0);
-  RUNTIME_ASSERT(object->IsJSSet() || object->IsJSMap());
+  CHECK(object->IsJSSet() || object->IsJSMap());
   return static_cast<JSCollection*>(object)->table();
 }
 
@@ -40,8 +40,8 @@
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
-  Handle<Smi> hash = Object::GetOrCreateHash(isolate, object);
-  return *hash;
+  Smi* hash = Object::GetOrCreateHash(isolate, object);
+  return hash;
 }
 
 
@@ -91,8 +91,8 @@
   CONVERT_ARG_HANDLE_CHECKED(JSSetIterator, holder, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSSet, set, 1);
   CONVERT_SMI_ARG_CHECKED(kind, 2)
-  RUNTIME_ASSERT(kind == JSSetIterator::kKindValues ||
-                 kind == JSSetIterator::kKindEntries);
+  CHECK(kind == JSSetIterator::kKindValues ||
+        kind == JSSetIterator::kKindEntries);
   Handle<OrderedHashSet> table(OrderedHashSet::cast(set->table()));
   holder->set_table(*table);
   holder->set_index(Smi::FromInt(0));
@@ -186,9 +186,9 @@
   CONVERT_ARG_HANDLE_CHECKED(JSMapIterator, holder, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSMap, map, 1);
   CONVERT_SMI_ARG_CHECKED(kind, 2)
-  RUNTIME_ASSERT(kind == JSMapIterator::kKindKeys ||
-                 kind == JSMapIterator::kKindValues ||
-                 kind == JSMapIterator::kKindEntries);
+  CHECK(kind == JSMapIterator::kKindKeys ||
+        kind == JSMapIterator::kKindValues ||
+        kind == JSMapIterator::kKindEntries);
   Handle<OrderedHashMap> table(OrderedHashMap::cast(map->table()));
   holder->set_table(*table);
   holder->set_index(Smi::FromInt(0));
@@ -232,7 +232,7 @@
   DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0);
   CONVERT_NUMBER_CHECKED(int, max_entries, Int32, args[1]);
-  RUNTIME_ASSERT(max_entries >= 0);
+  CHECK(max_entries >= 0);
 
   Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
   if (max_entries == 0 || max_entries > table->NumberOfElements()) {
@@ -250,7 +250,7 @@
     int count = 0;
     for (int i = 0; count / 2 < max_entries && i < table->Capacity(); i++) {
       Handle<Object> key(table->KeyAt(i), isolate);
-      if (table->IsKey(*key)) {
+      if (table->IsKey(isolate, *key)) {
         entries->set(count++, *key);
         Object* value = table->Lookup(key);
         entries->set(count++, value);
@@ -286,12 +286,13 @@
   CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
   CONVERT_SMI_ARG_CHECKED(hash, 2)
-  RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
+  CHECK(key->IsJSReceiver() || key->IsSymbol());
   Handle<ObjectHashTable> table(
       ObjectHashTable::cast(weak_collection->table()));
-  RUNTIME_ASSERT(table->IsKey(*key));
+  CHECK(table->IsKey(isolate, *key));
   Handle<Object> lookup(table->Lookup(key, hash), isolate);
-  return lookup->IsTheHole() ? isolate->heap()->undefined_value() : *lookup;
+  return lookup->IsTheHole(isolate) ? isolate->heap()->undefined_value()
+                                    : *lookup;
 }
 
 
@@ -301,12 +302,12 @@
   CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
   CONVERT_SMI_ARG_CHECKED(hash, 2)
-  RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
+  CHECK(key->IsJSReceiver() || key->IsSymbol());
   Handle<ObjectHashTable> table(
       ObjectHashTable::cast(weak_collection->table()));
-  RUNTIME_ASSERT(table->IsKey(*key));
+  CHECK(table->IsKey(isolate, *key));
   Handle<Object> lookup(table->Lookup(key, hash), isolate);
-  return isolate->heap()->ToBoolean(!lookup->IsTheHole());
+  return isolate->heap()->ToBoolean(!lookup->IsTheHole(isolate));
 }
 
 
@@ -316,10 +317,10 @@
   CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
   CONVERT_SMI_ARG_CHECKED(hash, 2)
-  RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
+  CHECK(key->IsJSReceiver() || key->IsSymbol());
   Handle<ObjectHashTable> table(
       ObjectHashTable::cast(weak_collection->table()));
-  RUNTIME_ASSERT(table->IsKey(*key));
+  CHECK(table->IsKey(isolate, *key));
   bool was_present = JSWeakCollection::Delete(weak_collection, key, hash);
   return isolate->heap()->ToBoolean(was_present);
 }
@@ -330,12 +331,12 @@
   DCHECK(args.length() == 4);
   CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
-  RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
+  CHECK(key->IsJSReceiver() || key->IsSymbol());
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
   CONVERT_SMI_ARG_CHECKED(hash, 3)
   Handle<ObjectHashTable> table(
       ObjectHashTable::cast(weak_collection->table()));
-  RUNTIME_ASSERT(table->IsKey(*key));
+  CHECK(table->IsKey(isolate, *key));
   JSWeakCollection::Set(weak_collection, key, value, hash);
   return *weak_collection;
 }
@@ -346,7 +347,7 @@
   DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0);
   CONVERT_NUMBER_CHECKED(int, max_values, Int32, args[1]);
-  RUNTIME_ASSERT(max_values >= 0);
+  CHECK(max_values >= 0);
 
   Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
   if (max_values == 0 || max_values > table->NumberOfElements()) {
@@ -361,8 +362,8 @@
     DisallowHeapAllocation no_gc;
     int count = 0;
     for (int i = 0; count < max_values && i < table->Capacity(); i++) {
-      Handle<Object> key(table->KeyAt(i), isolate);
-      if (table->IsKey(*key)) values->set(count++, *key);
+      Object* key = table->KeyAt(i);
+      if (table->IsKey(isolate, key)) values->set(count++, key);
     }
     DCHECK_EQ(max_values, count);
   }
diff --git a/src/runtime/runtime-compiler.cc b/src/runtime/runtime-compiler.cc
index c8fc9e8..c095045 100644
--- a/src/runtime/runtime-compiler.cc
+++ b/src/runtime/runtime-compiler.cc
@@ -202,7 +202,7 @@
   // We're not prepared to handle a function with arguments object.
   DCHECK(!function->shared()->uses_arguments());
 
-  RUNTIME_ASSERT(FLAG_use_osr);
+  CHECK(FLAG_use_osr);
 
   // Passing the PC in the javascript frame from the caller directly is
   // not GC safe, so we walk the stack to get it.
@@ -303,7 +303,7 @@
 
 bool CodeGenerationFromStringsAllowed(Isolate* isolate,
                                       Handle<Context> context) {
-  DCHECK(context->allow_code_gen_from_strings()->IsFalse());
+  DCHECK(context->allow_code_gen_from_strings()->IsFalse(isolate));
   // Check with callback if set.
   AllowCodeGenerationFromStringsCallback callback =
       isolate->allow_code_gen_callback();
@@ -326,7 +326,7 @@
 
   // Check if native context allows code generation from
   // strings. Throw an exception if it doesn't.
-  if (native_context->allow_code_gen_from_strings()->IsFalse() &&
+  if (native_context->allow_code_gen_from_strings()->IsFalse(isolate) &&
       !CodeGenerationFromStringsAllowed(isolate, native_context)) {
     Handle<Object> error_message =
         native_context->ErrorMessageForCodeGenerationFromStrings();
diff --git a/src/runtime/runtime-debug.cc b/src/runtime/runtime-debug.cc
index e3f3beb..b3be8f7 100644
--- a/src/runtime/runtime-debug.cc
+++ b/src/runtime/runtime-debug.cc
@@ -76,8 +76,8 @@
 RUNTIME_FUNCTION(Runtime_SetDebugEventListener) {
   SealHandleScope shs(isolate);
   DCHECK(args.length() == 2);
-  RUNTIME_ASSERT(args[0]->IsJSFunction() || args[0]->IsUndefined() ||
-                 args[0]->IsNull());
+  RUNTIME_ASSERT(args[0]->IsJSFunction() || args[0]->IsUndefined(isolate) ||
+                 args[0]->IsNull(isolate));
   CONVERT_ARG_HANDLE_CHECKED(Object, callback, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, data, 1);
   isolate->debug()->SetEventListener(callback, data);
@@ -145,7 +145,7 @@
     Isolate* isolate, Handle<IteratorType> object) {
   Factory* factory = isolate->factory();
   Handle<IteratorType> iterator = Handle<IteratorType>::cast(object);
-  RUNTIME_ASSERT_HANDLIFIED(iterator->kind()->IsSmi(), JSArray);
+  CHECK(iterator->kind()->IsSmi());
   const char* kind = NULL;
   switch (Smi::cast(iterator->kind())->value()) {
     case IteratorType::kKindKeys:
@@ -158,7 +158,7 @@
       kind = "entries";
       break;
     default:
-      RUNTIME_ASSERT_HANDLIFIED(false, JSArray);
+      UNREACHABLE();
   }
 
   Handle<FixedArray> result = factory->NewFixedArray(2 * 3);
@@ -243,12 +243,12 @@
     result->set(4, *receiver);
     result->set(5, generator->receiver());
     return factory->NewJSArrayWithElements(result);
-  } else if (Object::IsPromise(object)) {
+  } else if (object->IsJSPromise()) {
     Handle<JSObject> promise = Handle<JSObject>::cast(object);
 
     Handle<Object> status_obj =
         DebugGetProperty(promise, isolate->factory()->promise_state_symbol());
-    RUNTIME_ASSERT_HANDLIFIED(status_obj->IsSmi(), JSArray);
+    CHECK(status_obj->IsSmi());
     const char* status = "rejected";
     int status_val = Handle<Smi>::cast(status_obj)->value();
     switch (status_val) {
@@ -313,10 +313,8 @@
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
-  Handle<JSArray> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, Runtime::GetInternalProperties(isolate, obj));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate,
+                           Runtime::GetInternalProperties(isolate, obj));
 }
 
 
@@ -564,10 +562,13 @@
     // Use the value from the stack.
     if (ScopeInfo::VariableIsSynthetic(scope_info->LocalName(i))) continue;
     locals->set(local * 2, scope_info->LocalName(i));
-    Handle<Object> value = frame_inspector.GetExpression(i);
+    Handle<Object> value =
+        frame_inspector.GetExpression(scope_info->StackLocalIndex(i));
     // TODO(yangguo): We convert optimized out values to {undefined} when they
     // are passed to the debugger. Eventually we should handle them somehow.
-    if (value->IsOptimizedOut()) value = isolate->factory()->undefined_value();
+    if (value->IsOptimizedOut(isolate)) {
+      value = isolate->factory()->undefined_value();
+    }
     locals->set(local * 2 + 1, *value);
     local++;
   }
@@ -764,10 +765,7 @@
   if (it.Done()) {
     return isolate->heap()->undefined_value();
   }
-  Handle<JSObject> details;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, details,
-                                     it.MaterializeScopeDetails());
-  return *details;
+  RETURN_RESULT_OR_FAILURE(isolate, it.MaterializeScopeDetails());
 }
 
 
@@ -856,10 +854,7 @@
     return isolate->heap()->undefined_value();
   }
 
-  Handle<JSObject> details;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, details,
-                                     it.MaterializeScopeDetails());
-  return *details;
+  RETURN_RESULT_OR_FAILURE(isolate, it.MaterializeScopeDetails());
 }
 
 
@@ -971,7 +966,9 @@
   // Find the number of break points
   Handle<Object> break_locations =
       Debug::GetSourceBreakLocations(shared, alignment);
-  if (break_locations->IsUndefined()) return isolate->heap()->undefined_value();
+  if (break_locations->IsUndefined(isolate)) {
+    return isolate->heap()->undefined_value();
+  }
   // Return array as JS array
   return *isolate->factory()->NewJSArrayWithElements(
       Handle<FixedArray>::cast(break_locations));
@@ -1141,12 +1138,9 @@
 
   StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
 
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      DebugEvaluate::Local(isolate, id, inlined_jsframe_index, source,
-                           disable_break, context_extension));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(
+      isolate, DebugEvaluate::Local(isolate, id, inlined_jsframe_index, source,
+                                    disable_break, context_extension));
 }
 
 
@@ -1163,11 +1157,9 @@
   CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 2);
   CONVERT_ARG_HANDLE_CHECKED(HeapObject, context_extension, 3);
 
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
+  RETURN_RESULT_OR_FAILURE(
+      isolate,
       DebugEvaluate::Global(isolate, source, disable_break, context_extension));
-  return *result;
 }
 
 
@@ -1206,7 +1198,7 @@
 static bool HasInPrototypeChainIgnoringProxies(Isolate* isolate,
                                                JSObject* object,
                                                Object* proto) {
-  PrototypeIterator iter(isolate, object, PrototypeIterator::START_AT_RECEIVER);
+  PrototypeIterator iter(isolate, object, kStartAtReceiver);
   while (true) {
     iter.AdvanceIgnoringProxies();
     if (iter.IsAtEnd()) return false;
@@ -1224,7 +1216,7 @@
   DCHECK(args.length() == 3);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, target, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, filter, 1);
-  RUNTIME_ASSERT(filter->IsUndefined() || filter->IsJSObject());
+  RUNTIME_ASSERT(filter->IsUndefined(isolate) || filter->IsJSObject());
   CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[2]);
   RUNTIME_ASSERT(max_references >= 0);
 
@@ -1243,7 +1235,7 @@
       if (!obj->ReferencesObject(*target)) continue;
       // Check filter if supplied. This is normally used to avoid
       // references from mirror objects.
-      if (!filter->IsUndefined() &&
+      if (!filter->IsUndefined(isolate) &&
           HasInPrototypeChainIgnoringProxies(isolate, obj, *filter)) {
         continue;
       }
@@ -1313,12 +1305,9 @@
   HandleScope shs(isolate);
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
-  Handle<Object> prototype;
   // TODO(1543): Come up with a solution for clients to handle potential errors
   // thrown by an intermediate proxy.
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, prototype,
-                                     JSReceiver::GetPrototype(isolate, obj));
-  return *prototype;
+  RETURN_RESULT_OR_FAILURE(isolate, JSReceiver::GetPrototype(isolate, obj));
 }
 
 
@@ -1359,15 +1348,13 @@
 
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
 
-  Handle<Object> name;
   if (function->IsJSBoundFunction()) {
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, name, JSBoundFunction::GetName(
-                           isolate, Handle<JSBoundFunction>::cast(function)));
+    RETURN_RESULT_OR_FAILURE(
+        isolate, JSBoundFunction::GetName(
+                     isolate, Handle<JSBoundFunction>::cast(function)));
   } else {
-    name = JSFunction::GetDebugName(Handle<JSFunction>::cast(function));
+    return *JSFunction::GetDebugName(Handle<JSFunction>::cast(function));
   }
-  return *name;
 }
 
 
@@ -1423,12 +1410,9 @@
     return isolate->heap()->exception();
   }
 
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      Execution::Call(isolate, function, handle(function->global_proxy()), 0,
-                      NULL));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(
+      isolate, Execution::Call(isolate, function,
+                               handle(function->global_proxy()), 0, NULL));
 }
 
 
@@ -1501,6 +1485,212 @@
   return *Script::GetWrapper(found);
 }
 
+RUNTIME_FUNCTION(Runtime_ScriptLineCount) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_CHECKED(JSValue, script, 0);
+
+  RUNTIME_ASSERT(script->value()->IsScript());
+  Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
+
+  Script::InitLineEnds(script_handle);
+
+  FixedArray* line_ends_array = FixedArray::cast(script_handle->line_ends());
+  return Smi::FromInt(line_ends_array->length());
+}
+
+RUNTIME_FUNCTION(Runtime_ScriptLineStartPosition) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 2);
+  CONVERT_ARG_CHECKED(JSValue, script, 0);
+  CONVERT_NUMBER_CHECKED(int32_t, line, Int32, args[1]);
+
+  RUNTIME_ASSERT(script->value()->IsScript());
+  Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
+
+  Script::InitLineEnds(script_handle);
+
+  FixedArray* line_ends_array = FixedArray::cast(script_handle->line_ends());
+  const int line_count = line_ends_array->length();
+
+  // If line == line_count, we return the first position beyond the last line.
+  if (line < 0 || line > line_count) {
+    return Smi::FromInt(-1);
+  } else if (line == 0) {
+    return Smi::FromInt(0);
+  } else {
+    DCHECK(0 < line && line <= line_count);
+    const int pos = Smi::cast(line_ends_array->get(line - 1))->value() + 1;
+    return Smi::FromInt(pos);
+  }
+}
+
+RUNTIME_FUNCTION(Runtime_ScriptLineEndPosition) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 2);
+  CONVERT_ARG_CHECKED(JSValue, script, 0);
+  CONVERT_NUMBER_CHECKED(int32_t, line, Int32, args[1]);
+
+  RUNTIME_ASSERT(script->value()->IsScript());
+  Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
+
+  Script::InitLineEnds(script_handle);
+
+  FixedArray* line_ends_array = FixedArray::cast(script_handle->line_ends());
+  const int line_count = line_ends_array->length();
+
+  if (line < 0 || line >= line_count) {
+    return Smi::FromInt(-1);
+  } else {
+    return Smi::cast(line_ends_array->get(line));
+  }
+}
+
+static Handle<Object> GetJSPositionInfo(Handle<Script> script, int position,
+                                        Script::OffsetFlag offset_flag,
+                                        Isolate* isolate) {
+  Script::PositionInfo info;
+  if (!script->GetPositionInfo(position, &info, offset_flag)) {
+    return handle(isolate->heap()->null_value(), isolate);
+  }
+
+  Handle<String> source = handle(String::cast(script->source()), isolate);
+  Handle<String> sourceText =
+      isolate->factory()->NewSubString(source, info.line_start, info.line_end);
+
+  Handle<JSObject> jsinfo =
+      isolate->factory()->NewJSObject(isolate->object_function());
+
+  JSObject::AddProperty(jsinfo, isolate->factory()->script_string(), script,
+                        NONE);
+  JSObject::AddProperty(jsinfo, isolate->factory()->position_string(),
+                        handle(Smi::FromInt(position), isolate), NONE);
+  JSObject::AddProperty(jsinfo, isolate->factory()->line_string(),
+                        handle(Smi::FromInt(info.line), isolate), NONE);
+  JSObject::AddProperty(jsinfo, isolate->factory()->column_string(),
+                        handle(Smi::FromInt(info.column), isolate), NONE);
+  JSObject::AddProperty(jsinfo, isolate->factory()->sourceText_string(),
+                        sourceText, NONE);
+
+  return jsinfo;
+}
+
+// Get information on a specific source line and column possibly offset by a
+// fixed source position. This function is used to find a source position from
+// a line and column position. The fixed source position offset is typically
+// used to find a source position in a function based on a line and column in
+// the source for the function alone. The offset passed will then be the
+// start position of the source for the function within the full script source.
+// Note that incoming line and column parameters may be undefined, and are
+// assumed to be passed *with* offsets.
+RUNTIME_FUNCTION(Runtime_ScriptLocationFromLine) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 4);
+  CONVERT_ARG_CHECKED(JSValue, script, 0);
+
+  RUNTIME_ASSERT(script->value()->IsScript());
+  Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
+
+  // Line and column are possibly undefined and we need to handle these cases,
+  // additionally subtracting corresponding offsets.
+
+  int32_t line;
+  if (args[1]->IsNull(isolate) || args[1]->IsUndefined(isolate)) {
+    line = 0;
+  } else {
+    RUNTIME_ASSERT(args[1]->IsNumber());
+    line = NumberToInt32(args[1]) - script_handle->line_offset();
+  }
+
+  int32_t column;
+  if (args[2]->IsNull(isolate) || args[2]->IsUndefined(isolate)) {
+    column = 0;
+  } else {
+    RUNTIME_ASSERT(args[2]->IsNumber());
+    column = NumberToInt32(args[2]);
+    if (line == 0) column -= script_handle->column_offset();
+  }
+
+  CONVERT_NUMBER_CHECKED(int32_t, offset_position, Int32, args[3]);
+
+  if (line < 0 || column < 0 || offset_position < 0) {
+    return isolate->heap()->null_value();
+  }
+
+  Script::InitLineEnds(script_handle);
+
+  FixedArray* line_ends_array = FixedArray::cast(script_handle->line_ends());
+  const int line_count = line_ends_array->length();
+
+  int position;
+  if (line == 0) {
+    position = offset_position + column;
+  } else {
+    Script::PositionInfo info;
+    if (!script_handle->GetPositionInfo(offset_position, &info,
+                                        Script::NO_OFFSET) ||
+        info.line + line >= line_count) {
+      return isolate->heap()->null_value();
+    }
+
+    const int offset_line = info.line + line;
+    const int offset_line_position =
+        (offset_line == 0)
+            ? 0
+            : Smi::cast(line_ends_array->get(offset_line - 1))->value() + 1;
+    position = offset_line_position + column;
+  }
+
+  return *GetJSPositionInfo(script_handle, position, Script::NO_OFFSET,
+                            isolate);
+}
+
+RUNTIME_FUNCTION(Runtime_ScriptPositionInfo) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 3);
+  CONVERT_ARG_CHECKED(JSValue, script, 0);
+  CONVERT_NUMBER_CHECKED(int32_t, position, Int32, args[1]);
+  CONVERT_BOOLEAN_ARG_CHECKED(with_offset, 2);
+
+  RUNTIME_ASSERT(script->value()->IsScript());
+  Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
+
+  const Script::OffsetFlag offset_flag =
+      with_offset ? Script::WITH_OFFSET : Script::NO_OFFSET;
+  return *GetJSPositionInfo(script_handle, position, offset_flag, isolate);
+}
+
+// Returns the given line as a string, or null if line is out of bounds.
+// The parameter line is expected to include the script's line offset.
+RUNTIME_FUNCTION(Runtime_ScriptSourceLine) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 2);
+  CONVERT_ARG_CHECKED(JSValue, script, 0);
+  CONVERT_NUMBER_CHECKED(int32_t, line, Int32, args[1]);
+
+  RUNTIME_ASSERT(script->value()->IsScript());
+  Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
+
+  Script::InitLineEnds(script_handle);
+
+  FixedArray* line_ends_array = FixedArray::cast(script_handle->line_ends());
+  const int line_count = line_ends_array->length();
+
+  line -= script_handle->line_offset();
+  if (line < 0 || line_count <= line) {
+    return isolate->heap()->null_value();
+  }
+
+  const int start =
+      (line == 0) ? 0 : Smi::cast(line_ends_array->get(line - 1))->value() + 1;
+  const int end = Smi::cast(line_ends_array->get(line))->value();
+
+  Handle<String> source =
+      handle(String::cast(script_handle->source()), isolate);
+  Handle<String> str = isolate->factory()->NewSubString(source, start, end);
+
+  return *str;
+}
 
 // Set one shot breakpoints for the callback function that is passed to a
 // built-in function such as Array.forEach to enable stepping into the callback,
@@ -1513,6 +1703,22 @@
   return isolate->heap()->undefined_value();
 }
 
+// Set one shot breakpoints for the suspended generator object.
+RUNTIME_FUNCTION(Runtime_DebugPrepareStepInSuspendedGenerator) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(0, args.length());
+  isolate->debug()->PrepareStepInSuspendedGenerator();
+  return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_DebugRecordAsyncFunction) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+  CHECK(isolate->debug()->last_step_action() >= StepNext);
+  isolate->debug()->RecordAsyncFunction(generator);
+  return isolate->heap()->undefined_value();
+}
 
 RUNTIME_FUNCTION(Runtime_DebugPushPromise) {
   DCHECK(args.length() == 2);
@@ -1520,8 +1726,6 @@
   CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 1);
   isolate->PushPromise(promise, function);
-  // If we are in step-in mode, flood the handler.
-  isolate->debug()->EnableStepIn();
   return isolate->heap()->undefined_value();
 }
 
diff --git a/src/runtime/runtime-forin.cc b/src/runtime/runtime-forin.cc
index 4b558d1..e57016a 100644
--- a/src/runtime/runtime-forin.cc
+++ b/src/runtime/runtime-forin.cc
@@ -22,14 +22,18 @@
 // deletions during a for-in.
 MaybeHandle<HeapObject> Enumerate(Handle<JSReceiver> receiver) {
   Isolate* const isolate = receiver->GetIsolate();
-  FastKeyAccumulator accumulator(isolate, receiver, INCLUDE_PROTOS,
+  JSObject::MakePrototypesFast(receiver, kStartAtReceiver, isolate);
+  FastKeyAccumulator accumulator(isolate, receiver,
+                                 KeyCollectionMode::kIncludePrototypes,
                                  ENUMERABLE_STRINGS);
   accumulator.set_filter_proxy_keys(false);
+  accumulator.set_is_for_in(true);
   // Test if we have an enum cache for {receiver}.
   if (!accumulator.is_receiver_simple_enum()) {
     Handle<FixedArray> keys;
-    ASSIGN_RETURN_ON_EXCEPTION(isolate, keys, accumulator.GetKeys(KEEP_NUMBERS),
-                               HeapObject);
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate, keys, accumulator.GetKeys(GetKeysConversion::kKeepNumbers),
+        HeapObject);
     // Test again, since cache may have been built by GetKeys() calls above.
     if (!accumulator.is_receiver_simple_enum()) return keys;
   }
@@ -61,7 +65,7 @@
           Handle<Object> prototype;
           ASSIGN_RETURN_ON_EXCEPTION(isolate, prototype,
                                      JSProxy::GetPrototype(proxy), Object);
-          if (prototype->IsNull()) break;
+          if (prototype->IsNull(isolate)) break;
           // We already have a stack-check in JSProxy::GetPrototype.
           return HasEnumerableProperty(
               isolate, Handle<JSReceiver>::cast(prototype), key);
@@ -107,9 +111,7 @@
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
-  Handle<HeapObject> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, Enumerate(receiver));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, Enumerate(receiver));
 }
 
 
@@ -159,9 +161,7 @@
   DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, Filter(receiver, key));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, Filter(receiver, key));
 }
 
 
@@ -177,9 +177,7 @@
   if (receiver->map() == *cache_type) {
     return *key;
   }
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, Filter(receiver, key));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, Filter(receiver, key));
 }
 
 
diff --git a/src/runtime/runtime-function.cc b/src/runtime/runtime-function.cc
index 56cf3b6..3a66869 100644
--- a/src/runtime/runtime-function.cc
+++ b/src/runtime/runtime-function.cc
@@ -10,7 +10,6 @@
 #include "src/frames-inl.h"
 #include "src/isolate-inl.h"
 #include "src/messages.h"
-#include "src/profiler/cpu-profiler.h"
 #include "src/wasm/wasm-module.h"
 
 namespace v8 {
@@ -21,15 +20,13 @@
   DCHECK(args.length() == 1);
 
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
-  Handle<Object> result;
   if (function->IsJSBoundFunction()) {
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, result, JSBoundFunction::GetName(
-                             isolate, Handle<JSBoundFunction>::cast(function)));
+    RETURN_RESULT_OR_FAILURE(
+        isolate, JSBoundFunction::GetName(
+                     isolate, Handle<JSBoundFunction>::cast(function)));
   } else {
-    result = JSFunction::GetName(isolate, Handle<JSFunction>::cast(function));
+    return *JSFunction::GetName(isolate, Handle<JSFunction>::cast(function));
   }
-  return *result;
 }
 
 
@@ -51,7 +48,7 @@
   DCHECK(args.length() == 1);
 
   CONVERT_ARG_CHECKED(JSFunction, f, 0);
-  RUNTIME_ASSERT(f->RemovePrototype());
+  CHECK(f->RemovePrototype());
   f->shared()->set_construct_stub(
       *isolate->builtins()->ConstructedNonConstructable());
 
@@ -131,8 +128,7 @@
 
   CONVERT_ARG_CHECKED(JSFunction, fun, 0);
   CONVERT_SMI_ARG_CHECKED(length, 1);
-  RUNTIME_ASSERT((length & 0xC0000000) == 0xC0000000 ||
-                 (length & 0xC0000000) == 0x0);
+  CHECK((length & 0xC0000000) == 0xC0000000 || (length & 0xC0000000) == 0x0);
   fun->shared()->set_length(length);
   return isolate->heap()->undefined_value();
 }
@@ -144,7 +140,7 @@
 
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
-  RUNTIME_ASSERT(fun->IsConstructor());
+  CHECK(fun->IsConstructor());
   RETURN_FAILURE_ON_EXCEPTION(isolate,
                               Accessors::FunctionSetPrototype(fun, value));
   return args[0];  // return TOS
@@ -189,7 +185,8 @@
   }
   target_shared->set_scope_info(source_shared->scope_info());
   target_shared->set_length(source_shared->length());
-  target_shared->set_feedback_vector(source_shared->feedback_vector());
+  target_shared->set_num_literals(source_shared->num_literals());
+  target_shared->set_feedback_metadata(source_shared->feedback_metadata());
   target_shared->set_internal_formal_parameter_count(
       source_shared->internal_formal_parameter_count());
   target_shared->set_start_position_and_type(
@@ -206,21 +203,17 @@
 
   // Set the code of the target function.
   target->ReplaceCode(source_shared->code());
-  DCHECK(target->next_function_link()->IsUndefined());
+  DCHECK(target->next_function_link()->IsUndefined(isolate));
 
-  // Make sure we get a fresh copy of the literal vector to avoid cross
-  // context contamination.
   Handle<Context> context(source->context());
   target->set_context(*context);
 
-  int number_of_literals = source->NumberOfLiterals();
-  Handle<LiteralsArray> literals =
-      LiteralsArray::New(isolate, handle(target_shared->feedback_vector()),
-                         number_of_literals, TENURED);
-  target->set_literals(*literals);
+  // Make sure we get a fresh copy of the literal vector to avoid cross
+  // context contamination, and that the literal vector makes it's way into
+  // the target_shared optimized code map.
+  JSFunction::EnsureLiterals(target);
 
-  if (isolate->logger()->is_logging_code_events() ||
-      isolate->cpu_profiler()->is_profiling()) {
+  if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
     isolate->logger()->LogExistingFunction(
         source_shared, Handle<AbstractCode>(source_shared->abstract_code()));
   }
@@ -234,7 +227,7 @@
 // into the global object when doing call and apply.
 RUNTIME_FUNCTION(Runtime_SetNativeFlag) {
   SealHandleScope shs(isolate);
-  RUNTIME_ASSERT(args.length() == 1);
+  DCHECK_EQ(1, args.length());
 
   CONVERT_ARG_CHECKED(Object, object, 0);
 
@@ -255,7 +248,7 @@
 
 RUNTIME_FUNCTION(Runtime_SetForceInlineFlag) {
   SealHandleScope shs(isolate);
-  RUNTIME_ASSERT(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
 
   if (object->IsJSFunction()) {
@@ -276,11 +269,8 @@
   for (int i = 0; i < argc; ++i) {
     argv[i] = args.at<Object>(2 + i);
   }
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      Execution::Call(isolate, target, receiver, argc, argv.start()));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(
+      isolate, Execution::Call(isolate, target, receiver, argc, argv.start()));
 }
 
 
@@ -311,15 +301,5 @@
              : *JSFunction::ToString(Handle<JSFunction>::cast(function));
 }
 
-RUNTIME_FUNCTION(Runtime_WasmGetFunctionName) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, wasm, 0);
-  CONVERT_SMI_ARG_CHECKED(func_index, 1);
-
-  return *wasm::GetWasmFunctionName(wasm, func_index);
-}
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-futex.cc b/src/runtime/runtime-futex.cc
index f4ef679..a966412 100644
--- a/src/runtime/runtime-futex.cc
+++ b/src/runtime/runtime-futex.cc
@@ -24,10 +24,10 @@
   CONVERT_SIZE_ARG_CHECKED(index, 1);
   CONVERT_INT32_ARG_CHECKED(value, 2);
   CONVERT_DOUBLE_ARG_CHECKED(timeout, 3);
-  RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
-  RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
-  RUNTIME_ASSERT(sta->type() == kExternalInt32Array);
-  RUNTIME_ASSERT(timeout == V8_INFINITY || !std::isnan(timeout));
+  CHECK(sta->GetBuffer()->is_shared());
+  CHECK_LT(index, NumberToSize(isolate, sta->length()));
+  CHECK_EQ(sta->type(), kExternalInt32Array);
+  CHECK(timeout == V8_INFINITY || !std::isnan(timeout));
 
   Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
   size_t addr = (index << 2) + NumberToSize(isolate, sta->byte_offset());
@@ -42,9 +42,9 @@
   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
   CONVERT_SIZE_ARG_CHECKED(index, 1);
   CONVERT_INT32_ARG_CHECKED(count, 2);
-  RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
-  RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
-  RUNTIME_ASSERT(sta->type() == kExternalInt32Array);
+  CHECK(sta->GetBuffer()->is_shared());
+  CHECK_LT(index, NumberToSize(isolate, sta->length()));
+  CHECK_EQ(sta->type(), kExternalInt32Array);
 
   Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
   size_t addr = (index << 2) + NumberToSize(isolate, sta->byte_offset());
@@ -61,10 +61,10 @@
   CONVERT_INT32_ARG_CHECKED(count, 2);
   CONVERT_INT32_ARG_CHECKED(value, 3);
   CONVERT_SIZE_ARG_CHECKED(index2, 4);
-  RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
-  RUNTIME_ASSERT(index1 < NumberToSize(isolate, sta->length()));
-  RUNTIME_ASSERT(index2 < NumberToSize(isolate, sta->length()));
-  RUNTIME_ASSERT(sta->type() == kExternalInt32Array);
+  CHECK(sta->GetBuffer()->is_shared());
+  CHECK_LT(index1, NumberToSize(isolate, sta->length()));
+  CHECK_LT(index2, NumberToSize(isolate, sta->length()));
+  CHECK_EQ(sta->type(), kExternalInt32Array);
 
   Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
   size_t addr1 = (index1 << 2) + NumberToSize(isolate, sta->byte_offset());
@@ -80,9 +80,9 @@
   DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
   CONVERT_SIZE_ARG_CHECKED(index, 1);
-  RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
-  RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
-  RUNTIME_ASSERT(sta->type() == kExternalInt32Array);
+  CHECK(sta->GetBuffer()->is_shared());
+  CHECK_LT(index, NumberToSize(isolate, sta->length()));
+  CHECK_EQ(sta->type(), kExternalInt32Array);
 
   Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
   size_t addr = (index << 2) + NumberToSize(isolate, sta->byte_offset());
diff --git a/src/runtime/runtime-generator.cc b/src/runtime/runtime-generator.cc
index 7ff7fc8..3b65682 100644
--- a/src/runtime/runtime-generator.cc
+++ b/src/runtime/runtime-generator.cc
@@ -5,6 +5,7 @@
 #include "src/runtime/runtime-utils.h"
 
 #include "src/arguments.h"
+#include "src/debug/debug.h"
 #include "src/factory.h"
 #include "src/frames-inl.h"
 #include "src/objects-inl.h"
@@ -17,14 +18,15 @@
   DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
-  RUNTIME_ASSERT(function->shared()->is_resumable());
+  CHECK(function->shared()->is_resumable());
 
   Handle<FixedArray> operand_stack;
-  if (FLAG_ignition && FLAG_ignition_generators) {
+  if (function->shared()->HasBytecodeArray()) {
+    // New-style generators.
     int size = function->shared()->bytecode_array()->register_count();
     operand_stack = isolate->factory()->NewFixedArray(size);
   } else {
-    DCHECK(!function->shared()->HasBytecodeArray());
+    // Old-style generators.
     operand_stack = handle(isolate->heap()->empty_fixed_array());
   }
 
@@ -38,7 +40,6 @@
   return *generator;
 }
 
-
 RUNTIME_FUNCTION(Runtime_SuspendJSGeneratorObject) {
   HandleScope handle_scope(isolate);
   DCHECK(args.length() == 1);
@@ -46,11 +47,13 @@
 
   JavaScriptFrameIterator stack_iterator(isolate);
   JavaScriptFrame* frame = stack_iterator.frame();
-  RUNTIME_ASSERT(frame->function()->shared()->is_resumable());
+  CHECK(frame->function()->shared()->is_resumable());
   DCHECK_EQ(frame->function(), generator_object->function());
   DCHECK(frame->function()->shared()->is_compiled());
   DCHECK(!frame->function()->IsOptimized());
 
+  isolate->debug()->RecordAsyncFunction(generator_object);
+
   // The caller should have saved the context and continuation already.
   DCHECK_EQ(generator_object->context(), Context::cast(frame->context()));
   DCHECK_LT(0, generator_object->continuation());
@@ -77,7 +80,6 @@
   return isolate->heap()->undefined_value();
 }
 
-
 RUNTIME_FUNCTION(Runtime_GeneratorClose) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
@@ -88,8 +90,6 @@
   return isolate->heap()->undefined_value();
 }
 
-
-// Returns function of generator activation.
 RUNTIME_FUNCTION(Runtime_GeneratorGetFunction) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
@@ -98,8 +98,6 @@
   return generator->function();
 }
 
-
-// Returns receiver of generator activation.
 RUNTIME_FUNCTION(Runtime_GeneratorGetReceiver) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
@@ -108,18 +106,14 @@
   return generator->receiver();
 }
 
-
-// Returns input of generator activation.
-RUNTIME_FUNCTION(Runtime_GeneratorGetInput) {
+RUNTIME_FUNCTION(Runtime_GeneratorGetInputOrDebugPos) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
 
-  return generator->input();
+  return generator->input_or_debug_pos();
 }
 
-
-// Returns resume mode of generator activation.
 RUNTIME_FUNCTION(Runtime_GeneratorGetResumeMode) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
@@ -128,17 +122,6 @@
   return Smi::FromInt(generator->resume_mode());
 }
 
-
-RUNTIME_FUNCTION(Runtime_GeneratorSetContext) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
-
-  generator->set_context(isolate->context());
-  return isolate->heap()->undefined_value();
-}
-
-
 RUNTIME_FUNCTION(Runtime_GeneratorGetContinuation) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
@@ -147,59 +130,13 @@
   return Smi::FromInt(generator->continuation());
 }
 
-
-RUNTIME_FUNCTION(Runtime_GeneratorSetContinuation) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
-  CONVERT_SMI_ARG_CHECKED(continuation, 1);
-
-  generator->set_continuation(continuation);
-  return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_GeneratorLoadRegister) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
-  CONVERT_SMI_ARG_CHECKED(index, 1);
-
-  DCHECK(FLAG_ignition && FLAG_ignition_generators);
-  DCHECK(generator->function()->shared()->HasBytecodeArray());
-
-  return generator->operand_stack()->get(index);
-}
-
-
-RUNTIME_FUNCTION(Runtime_GeneratorStoreRegister) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
-  CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
-  CONVERT_SMI_ARG_CHECKED(index, 1);
-  CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
-
-  DCHECK(FLAG_ignition && FLAG_ignition_generators);
-  DCHECK(generator->function()->shared()->HasBytecodeArray());
-
-  generator->operand_stack()->set(index, *value);
-  return isolate->heap()->undefined_value();
-}
-
-
 RUNTIME_FUNCTION(Runtime_GeneratorGetSourcePosition) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
 
-  if (generator->is_suspended()) {
-    Handle<Code> code(generator->function()->code(), isolate);
-    int offset = generator->continuation();
-    RUNTIME_ASSERT(0 <= offset && offset < code->instruction_size());
-    return Smi::FromInt(code->SourcePosition(offset));
-  }
-
-  return isolate->heap()->undefined_value();
+  if (!generator->is_suspended()) return isolate->heap()->undefined_value();
+  return Smi::FromInt(generator->source_position());
 }
 
 }  // namespace internal
diff --git a/src/runtime/runtime-i18n.cc b/src/runtime/runtime-i18n.cc
index 14974e8..d4c6034 100644
--- a/src/runtime/runtime-i18n.cc
+++ b/src/runtime/runtime-i18n.cc
@@ -259,7 +259,7 @@
 
   Handle<Symbol> marker = isolate->factory()->intl_initialized_marker_symbol();
   Handle<Object> tag = JSReceiver::GetDataProperty(obj, marker);
-  return isolate->heap()->ToBoolean(!tag->IsUndefined());
+  return isolate->heap()->ToBoolean(!tag->IsUndefined(isolate));
 }
 
 
@@ -317,7 +317,7 @@
   Handle<Symbol> marker = isolate->factory()->intl_impl_object_symbol();
 
   Handle<Object> impl = JSReceiver::GetDataProperty(obj, marker);
-  if (impl->IsTheHole()) {
+  if (impl->IsTheHole(isolate)) {
     THROW_NEW_ERROR_RETURN_FAILURE(
         isolate, NewTypeError(MessageTemplate::kNotIntlObject, obj));
   }
@@ -382,13 +382,10 @@
   icu::UnicodeString result;
   date_format->format(value->Number(), result);
 
-  Handle<String> result_str;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result_str,
-      isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
-          reinterpret_cast<const uint16_t*>(result.getBuffer()),
-          result.length())));
-  return *result_str;
+  RETURN_RESULT_OR_FAILURE(
+      isolate, isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
+                   reinterpret_cast<const uint16_t*>(result.getBuffer()),
+                   result.length())));
 }
 
 
@@ -410,12 +407,9 @@
   UDate date = date_format->parse(u_date, status);
   if (U_FAILURE(status)) return isolate->heap()->undefined_value();
 
-  Handle<JSDate> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      JSDate::New(isolate->date_function(), isolate->date_function(),
-                  static_cast<double>(date)));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(
+      isolate, JSDate::New(isolate->date_function(), isolate->date_function(),
+                           static_cast<double>(date)));
 }
 
 
@@ -476,13 +470,10 @@
   icu::UnicodeString result;
   number_format->format(value->Number(), result);
 
-  Handle<String> result_str;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result_str,
-      isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
-          reinterpret_cast<const uint16_t*>(result.getBuffer()),
-          result.length())));
-  return *result_str;
+  RETURN_RESULT_OR_FAILURE(
+      isolate, isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
+                   reinterpret_cast<const uint16_t*>(result.getBuffer()),
+                   result.length())));
 }
 
 
@@ -647,13 +638,10 @@
     return isolate->heap()->undefined_value();
   }
 
-  Handle<String> result_str;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result_str,
-      isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
-          reinterpret_cast<const uint16_t*>(result.getBuffer()),
-          result.length())));
-  return *result_str;
+  RETURN_RESULT_OR_FAILURE(
+      isolate, isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
+                   reinterpret_cast<const uint16_t*>(result.getBuffer()),
+                   result.length())));
 }
 
 
@@ -848,13 +836,11 @@
       // If no change is made, just return |s|.
       if (converted.getBuffer() == src) return *s;
     }
-    Handle<String> result;
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, result,
+    RETURN_RESULT_OR_FAILURE(
+        isolate,
         isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
             reinterpret_cast<const uint16_t*>(converted.getBuffer()),
             converted.length())));
-    return *result;
   }
 
   auto case_converter = is_to_upper ? u_strToUpper : u_strToLower;
@@ -1145,6 +1131,23 @@
                            reinterpret_cast<const char*>(lang_str));
 }
 
+RUNTIME_FUNCTION(Runtime_DateCacheVersion) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(0, args.length());
+  if (isolate->serializer_enabled()) return isolate->heap()->undefined_value();
+  if (!isolate->eternal_handles()->Exists(EternalHandles::DATE_CACHE_VERSION)) {
+    Handle<FixedArray> date_cache_version =
+        isolate->factory()->NewFixedArray(1, TENURED);
+    date_cache_version->set(0, Smi::FromInt(0));
+    isolate->eternal_handles()->CreateSingleton(
+        isolate, *date_cache_version, EternalHandles::DATE_CACHE_VERSION);
+  }
+  Handle<FixedArray> date_cache_version =
+      Handle<FixedArray>::cast(isolate->eternal_handles()->GetSingleton(
+          EternalHandles::DATE_CACHE_VERSION));
+  return date_cache_version->get(0);
+}
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/runtime/runtime-internal.cc b/src/runtime/runtime-internal.cc
index f805fdb..e7491ba 100644
--- a/src/runtime/runtime-internal.cc
+++ b/src/runtime/runtime-internal.cc
@@ -13,6 +13,7 @@
 #include "src/isolate-inl.h"
 #include "src/messages.h"
 #include "src/parsing/parser.h"
+#include "src/wasm/wasm-module.h"
 
 namespace v8 {
 namespace internal {
@@ -20,7 +21,7 @@
 RUNTIME_FUNCTION(Runtime_CheckIsBootstrapping) {
   SealHandleScope shs(isolate);
   DCHECK(args.length() == 0);
-  RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
+  CHECK(isolate->bootstrapper()->IsActive());
   return isolate->heap()->undefined_value();
 }
 
@@ -29,7 +30,7 @@
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, container, 0);
-  RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
+  CHECK(isolate->bootstrapper()->IsActive());
   JSObject::NormalizeProperties(container, KEEP_INOBJECT_PROPERTIES, 10,
                                 "ExportFromRuntime");
   Bootstrapper::ExportFromRuntime(isolate, container);
@@ -42,7 +43,7 @@
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, container, 0);
-  RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
+  CHECK(isolate->bootstrapper()->IsActive());
   JSObject::NormalizeProperties(container, KEEP_INOBJECT_PROPERTIES, 10,
                                 "ExportExperimentalFromRuntime");
   Bootstrapper::ExportExperimentalFromRuntime(isolate, container);
@@ -55,21 +56,21 @@
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
-  RUNTIME_ASSERT(array->HasFastElements());
-  RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
+  CHECK(array->HasFastElements());
+  CHECK(isolate->bootstrapper()->IsActive());
   Handle<Context> native_context = isolate->native_context();
   Handle<FixedArray> fixed_array(FixedArray::cast(array->elements()));
   int length = Smi::cast(array->length())->value();
   for (int i = 0; i < length; i += 2) {
-    RUNTIME_ASSERT(fixed_array->get(i)->IsString());
+    CHECK(fixed_array->get(i)->IsString());
     Handle<String> name(String::cast(fixed_array->get(i)));
-    RUNTIME_ASSERT(fixed_array->get(i + 1)->IsJSObject());
+    CHECK(fixed_array->get(i + 1)->IsJSObject());
     Handle<JSObject> object(JSObject::cast(fixed_array->get(i + 1)));
     int index = Context::ImportedFieldIndexForName(name);
     if (index == Context::kNotFound) {
       index = Context::IntrinsicIndexForName(name);
     }
-    RUNTIME_ASSERT(index != Context::kNotFound);
+    CHECK(index != Context::kNotFound);
     native_context->set(index, *object);
   }
   return isolate->heap()->undefined_value();
@@ -140,8 +141,9 @@
                       LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
     if (it.IsFound()) {
       DCHECK(JSReceiver::GetDataProperty(&it)->IsSmi());
+      // Make column number 1-based here.
       Maybe<bool> data_set = JSReceiver::SetDataProperty(
-          &it, handle(Smi::FromInt(byte_offset), isolate));
+          &it, handle(Smi::FromInt(byte_offset + 1), isolate));
       DCHECK(data_set.IsJust() && data_set.FromJust() == true);
       USE(data_set);
     }
@@ -258,7 +260,7 @@
   if (debug_event) isolate->debug()->OnPromiseReject(promise, value);
   Handle<Symbol> key = isolate->factory()->promise_has_handler_symbol();
   // Do not report if we actually have a handler.
-  if (JSReceiver::GetDataProperty(promise, key)->IsUndefined()) {
+  if (JSReceiver::GetDataProperty(promise, key)->IsUndefined(isolate)) {
     isolate->ReportPromiseReject(promise, value,
                                  v8::kPromiseRejectWithNoHandler);
   }
@@ -272,7 +274,7 @@
   CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
   Handle<Symbol> key = isolate->factory()->promise_has_handler_symbol();
   // At this point, no revocation has been issued before
-  RUNTIME_ASSERT(JSReceiver::GetDataProperty(promise, key)->IsUndefined());
+  CHECK(JSReceiver::GetDataProperty(promise, key)->IsUndefined(isolate));
   isolate->ReportPromiseReject(promise, Handle<Object>(),
                                v8::kPromiseHandlerAddedAfterReject);
   return isolate->heap()->undefined_value();
@@ -304,9 +306,9 @@
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   CONVERT_SMI_ARG_CHECKED(size, 0);
-  RUNTIME_ASSERT(IsAligned(size, kPointerSize));
-  RUNTIME_ASSERT(size > 0);
-  RUNTIME_ASSERT(size <= Page::kMaxRegularHeapObjectSize);
+  CHECK(IsAligned(size, kPointerSize));
+  CHECK(size > 0);
+  CHECK(size <= Page::kMaxRegularHeapObjectSize);
   return *isolate->factory()->NewFillerObject(size, false, NEW_SPACE);
 }
 
@@ -316,14 +318,33 @@
   DCHECK(args.length() == 2);
   CONVERT_SMI_ARG_CHECKED(size, 0);
   CONVERT_SMI_ARG_CHECKED(flags, 1);
-  RUNTIME_ASSERT(IsAligned(size, kPointerSize));
-  RUNTIME_ASSERT(size > 0);
-  RUNTIME_ASSERT(size <= Page::kMaxRegularHeapObjectSize);
+  CHECK(IsAligned(size, kPointerSize));
+  CHECK(size > 0);
+  CHECK(size <= Page::kMaxRegularHeapObjectSize);
   bool double_align = AllocateDoubleAlignFlag::decode(flags);
   AllocationSpace space = AllocateTargetSpace::decode(flags);
   return *isolate->factory()->NewFillerObject(size, double_align, space);
 }
 
+RUNTIME_FUNCTION(Runtime_AllocateSeqOneByteString) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_SMI_ARG_CHECKED(length, 0);
+  Handle<SeqOneByteString> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result, isolate->factory()->NewRawOneByteString(length));
+  return *result;
+}
+
+RUNTIME_FUNCTION(Runtime_AllocateSeqTwoByteString) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_SMI_ARG_CHECKED(length, 0);
+  Handle<SeqTwoByteString> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result, isolate->factory()->NewRawTwoByteString(length));
+  return *result;
+}
 
 // Collect the raw data for a stack trace.  Returns an array of 4
 // element segments each containing a receiver, function, code and
@@ -369,23 +390,20 @@
   CONVERT_ARG_HANDLE_CHECKED(String, arg0, 1);
   CONVERT_ARG_HANDLE_CHECKED(String, arg1, 2);
   CONVERT_ARG_HANDLE_CHECKED(String, arg2, 3);
-  Handle<String> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      MessageTemplate::FormatMessage(template_index, arg0, arg1, arg2));
   isolate->native_context()->IncrementErrorsThrown();
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, MessageTemplate::FormatMessage(
+                                        template_index, arg0, arg1, arg2));
 }
 
-#define CALLSITE_GET(NAME, RETURN)                                  \
-  RUNTIME_FUNCTION(Runtime_CallSite##NAME##RT) {                    \
-    HandleScope scope(isolate);                                     \
-    DCHECK(args.length() == 1);                                     \
-    CONVERT_ARG_HANDLE_CHECKED(JSObject, call_site_obj, 0);         \
-    Handle<String> result;                                          \
-    CallSite call_site(isolate, call_site_obj);                     \
-    RUNTIME_ASSERT(call_site.IsJavaScript() || call_site.IsWasm()); \
-    return RETURN(call_site.NAME(), isolate);                       \
+#define CALLSITE_GET(NAME, RETURN)                          \
+  RUNTIME_FUNCTION(Runtime_CallSite##NAME##RT) {            \
+    HandleScope scope(isolate);                             \
+    DCHECK(args.length() == 1);                             \
+    CONVERT_ARG_HANDLE_CHECKED(JSObject, call_site_obj, 0); \
+    Handle<String> result;                                  \
+    CallSite call_site(isolate, call_site_obj);             \
+    CHECK(call_site.IsJavaScript() || call_site.IsWasm());  \
+    return RETURN(call_site.NAME(), isolate);               \
   }
 
 static inline Object* ReturnDereferencedHandle(Handle<Object> obj,
@@ -434,8 +452,8 @@
     JSFunction* fun = frame->function();
     Object* script = fun->shared()->script();
     if (script->IsScript() &&
-        !(Script::cast(script)->source()->IsUndefined())) {
-      Handle<Script> casted_script(Script::cast(script));
+        !(Script::cast(script)->source()->IsUndefined(isolate))) {
+      Handle<Script> casted_script(Script::cast(script), isolate);
       // Compute the location from the function and the relocation info of the
       // baseline code. For optimized code this will use the deoptimization
       // information to get canonical location information.
@@ -515,11 +533,8 @@
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
-  Handle<FixedArray> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      Object::CreateListFromArrayLike(isolate, object, ElementTypes::kAll));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, Object::CreateListFromArrayLike(
+                                        isolate, object, ElementTypes::kAll));
 }
 
 
@@ -597,10 +612,17 @@
   DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, callable, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, Object::OrdinaryHasInstance(isolate, callable, object));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(
+      isolate, Object::OrdinaryHasInstance(isolate, callable, object));
+}
+
+RUNTIME_FUNCTION(Runtime_IsWasmObject) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_CHECKED(Object, object, 0);
+  bool is_wasm_object =
+      object->IsJSObject() && wasm::IsWasmObject(JSObject::cast(object));
+  return *isolate->factory()->ToBoolean(is_wasm_object);
 }
 
 }  // namespace internal
diff --git a/src/runtime/runtime-json.cc b/src/runtime/runtime-json.cc
deleted file mode 100644
index 72fc758..0000000
--- a/src/runtime/runtime-json.cc
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/runtime/runtime-utils.h"
-
-#include "src/arguments.h"
-#include "src/char-predicates-inl.h"
-#include "src/isolate-inl.h"
-#include "src/json-parser.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-RUNTIME_FUNCTION(Runtime_QuoteJSONString) {
-  HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
-  DCHECK(args.length() == 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, Runtime::BasicJsonStringifyString(isolate, string));
-  return *result;
-}
-
-RUNTIME_FUNCTION(Runtime_BasicJSONStringify) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, Runtime::BasicJsonStringify(isolate, object));
-  return *result;
-}
-
-RUNTIME_FUNCTION(Runtime_ParseJson) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(1, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
-  Handle<String> source;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, source,
-                                     Object::ToString(isolate, object));
-  source = String::Flatten(source);
-  // Optimized fast case where we only have Latin1 characters.
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     source->IsSeqOneByteString()
-                                         ? JsonParser<true>::Parse(source)
-                                         : JsonParser<false>::Parse(source));
-  return *result;
-}
-
-}  // namespace internal
-}  // namespace v8
diff --git a/src/runtime/runtime-literals.cc b/src/runtime/runtime-literals.cc
index 34feeba..9c43b40 100644
--- a/src/runtime/runtime-literals.cc
+++ b/src/runtime/runtime-literals.cc
@@ -85,7 +85,9 @@
     uint32_t element_index = 0;
     if (key->ToArrayIndex(&element_index)) {
       // Array index (uint32).
-      if (value->IsUninitialized()) value = handle(Smi::FromInt(0), isolate);
+      if (value->IsUninitialized(isolate)) {
+        value = handle(Smi::FromInt(0), isolate);
+      }
       maybe_result = JSObject::SetOwnElementIgnoreAttributes(
           boilerplate, element_index, value, NONE);
     } else {
@@ -209,7 +211,7 @@
 
   // Check if boilerplate exists. If not, create it first.
   Handle<Object> boilerplate(closure->literals()->literal(index), isolate);
-  if (boilerplate->IsUndefined()) {
+  if (boilerplate->IsUndefined(isolate)) {
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
         isolate, boilerplate, JSRegExp::New(pattern, JSRegExp::Flags(flags)));
     closure->literals()->set_literal(index, *boilerplate);
@@ -229,14 +231,14 @@
   bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
   bool enable_mementos = (flags & ObjectLiteral::kDisableMementos) == 0;
 
-  RUNTIME_ASSERT(literals_index >= 0 &&
-                 literals_index < literals->literals_count());
+  CHECK(literals_index >= 0);
+  CHECK(literals_index < literals->literals_count());
 
   // Check if boilerplate exists. If not, create it first.
   Handle<Object> literal_site(literals->literal(literals_index), isolate);
   Handle<AllocationSite> site;
   Handle<JSObject> boilerplate;
-  if (*literal_site == isolate->heap()->undefined_value()) {
+  if (literal_site->IsUndefined(isolate)) {
     Handle<Object> raw_boilerplate;
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
         isolate, raw_boilerplate,
@@ -263,9 +265,7 @@
   MaybeHandle<Object> maybe_copy =
       JSObject::DeepCopy(boilerplate, &usage_context);
   usage_context.ExitScope(site, boilerplate);
-  Handle<Object> copy;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, copy, maybe_copy);
-  return *copy;
+  RETURN_RESULT_OR_FAILURE(isolate, maybe_copy);
 }
 
 MUST_USE_RESULT static MaybeHandle<AllocationSite> GetLiteralAllocationSite(
@@ -274,7 +274,7 @@
   // Check if boilerplate exists. If not, create it first.
   Handle<Object> literal_site(literals->literal(literals_index), isolate);
   Handle<AllocationSite> site;
-  if (*literal_site == isolate->heap()->undefined_value()) {
+  if (literal_site->IsUndefined(isolate)) {
     DCHECK(*elements != isolate->heap()->empty_fixed_array());
     Handle<Object> boilerplate;
     ASSIGN_RETURN_ON_EXCEPTION(
@@ -302,9 +302,7 @@
 static MaybeHandle<JSObject> CreateArrayLiteralImpl(
     Isolate* isolate, Handle<LiteralsArray> literals, int literals_index,
     Handle<FixedArray> elements, int flags) {
-  RUNTIME_ASSERT_HANDLIFIED(
-      literals_index >= 0 && literals_index < literals->literals_count(),
-      JSObject);
+  CHECK(literals_index >= 0 && literals_index < literals->literals_count());
   Handle<AllocationSite> site;
   ASSIGN_RETURN_ON_EXCEPTION(
       isolate, site,
@@ -333,12 +331,10 @@
   CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
   CONVERT_SMI_ARG_CHECKED(flags, 3);
 
-  Handle<JSObject> result;
   Handle<LiteralsArray> literals(closure->literals(), isolate);
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, CreateArrayLiteralImpl(isolate, literals, literals_index,
-                                              elements, flags));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(
+      isolate, CreateArrayLiteralImpl(isolate, literals, literals_index,
+                                      elements, flags));
 }
 
 
@@ -349,13 +345,11 @@
   CONVERT_SMI_ARG_CHECKED(literals_index, 1);
   CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
 
-  Handle<JSObject> result;
   Handle<LiteralsArray> literals(closure->literals(), isolate);
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
+  RETURN_RESULT_OR_FAILURE(
+      isolate,
       CreateArrayLiteralImpl(isolate, literals, literals_index, elements,
                              ArrayLiteral::kShallowElements));
-  return *result;
 }
 
 }  // namespace internal
diff --git a/src/runtime/runtime-liveedit.cc b/src/runtime/runtime-liveedit.cc
index da342de..72e8648 100644
--- a/src/runtime/runtime-liveedit.cc
+++ b/src/runtime/runtime-liveedit.cc
@@ -70,10 +70,8 @@
   RUNTIME_ASSERT(script->value()->IsScript());
   Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
 
-  Handle<JSArray> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, LiveEdit::GatherCompileInfo(script_handle, source));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate,
+                           LiveEdit::GatherCompileInfo(script_handle, source));
 }
 
 
@@ -223,7 +221,7 @@
         isolate, new_element,
         JSReceiver::GetElement(isolate, new_shared_array, i));
     RUNTIME_ASSERT(
-        new_element->IsUndefined() ||
+        new_element->IsUndefined(isolate) ||
         (new_element->IsJSValue() &&
          Handle<JSValue>::cast(new_element)->value()->IsSharedFunctionInfo()));
   }
diff --git a/src/runtime/runtime-maths.cc b/src/runtime/runtime-maths.cc
index 91b6181..1a923bf 100644
--- a/src/runtime/runtime-maths.cc
+++ b/src/runtime/runtime-maths.cc
@@ -9,24 +9,10 @@
 #include "src/base/utils/random-number-generator.h"
 #include "src/bootstrapper.h"
 #include "src/codegen.h"
-#include "src/third_party/fdlibm/fdlibm.h"
 
 namespace v8 {
 namespace internal {
 
-#define RUNTIME_UNARY_MATH(Name, name)                         \
-  RUNTIME_FUNCTION(Runtime_Math##Name) {                       \
-    HandleScope scope(isolate);                                \
-    DCHECK(args.length() == 1);                                \
-    isolate->counters()->math_##name##_runtime()->Increment(); \
-    CONVERT_DOUBLE_ARG_CHECKED(x, 0);                          \
-    return *isolate->factory()->NewHeapNumber(std::name(x));   \
-  }
-
-RUNTIME_UNARY_MATH(LogRT, log)
-#undef RUNTIME_UNARY_MATH
-
-
 RUNTIME_FUNCTION(Runtime_DoubleHi) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
@@ -49,65 +35,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_ConstructDouble) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_NUMBER_CHECKED(uint32_t, hi, Uint32, args[0]);
-  CONVERT_NUMBER_CHECKED(uint32_t, lo, Uint32, args[1]);
-  uint64_t result = (static_cast<uint64_t>(hi) << 32) | lo;
-  return *isolate->factory()->NewNumber(uint64_to_double(result));
-}
-
-
-RUNTIME_FUNCTION(Runtime_RemPiO2) {
-  SealHandleScope shs(isolate);
-  DisallowHeapAllocation no_gc;
-  DCHECK(args.length() == 2);
-  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
-  CONVERT_ARG_CHECKED(JSTypedArray, result, 1);
-  RUNTIME_ASSERT(result->byte_length() == Smi::FromInt(2 * sizeof(double)));
-  FixedFloat64Array* array = FixedFloat64Array::cast(result->elements());
-  double* y = static_cast<double*>(array->DataPtr());
-  return Smi::FromInt(fdlibm::rempio2(x, y));
-}
-
-
-static const double kPiDividedBy4 = 0.78539816339744830962;
-
-
-RUNTIME_FUNCTION(Runtime_MathAtan2) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  isolate->counters()->math_atan2_runtime()->Increment();
-  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
-  CONVERT_DOUBLE_ARG_CHECKED(y, 1);
-  double result;
-  if (std::isinf(x) && std::isinf(y)) {
-    // Make sure that the result in case of two infinite arguments
-    // is a multiple of Pi / 4. The sign of the result is determined
-    // by the first argument (x) and the sign of the second argument
-    // determines the multiplier: one or three.
-    int multiplier = (x < 0) ? -1 : 1;
-    if (y < 0) multiplier *= 3;
-    result = multiplier * kPiDividedBy4;
-  } else {
-    result = std::atan2(x, y);
-  }
-  return *isolate->factory()->NewNumber(result);
-}
-
-
-RUNTIME_FUNCTION(Runtime_MathExpRT) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  isolate->counters()->math_exp_runtime()->Increment();
-
-  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
-  lazily_initialize_fast_exp(isolate);
-  return *isolate->factory()->NewNumber(fast_exp(x, isolate));
-}
-
-
 // Slow version of Math.pow.  We check for fast paths for special cases.
 // Used if VFP3 is not available.
 RUNTIME_FUNCTION(Runtime_MathPow) {
diff --git a/src/runtime/runtime-numbers.cc b/src/runtime/runtime-numbers.cc
index efbdeb2..edd83bc 100644
--- a/src/runtime/runtime-numbers.cc
+++ b/src/runtime/runtime-numbers.cc
@@ -17,7 +17,7 @@
   HandleScope scope(isolate);
   DCHECK(args.length() == 2);
   CONVERT_SMI_ARG_CHECKED(radix, 1);
-  RUNTIME_ASSERT(2 <= radix && radix <= 36);
+  CHECK(2 <= radix && radix <= 36);
 
   // Fast case where the result is a one character string.
   if (args[0]->IsSmi()) {
@@ -56,8 +56,8 @@
   CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
   int f = FastD2IChecked(f_number);
   // See DoubleToFixedCString for these constants:
-  RUNTIME_ASSERT(f >= 0 && f <= 20);
-  RUNTIME_ASSERT(!Double(value).IsSpecial());
+  CHECK(f >= 0 && f <= 20);
+  CHECK(!Double(value).IsSpecial());
   char* str = DoubleToFixedCString(value, f);
   Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
   DeleteArray(str);
@@ -72,8 +72,8 @@
   CONVERT_DOUBLE_ARG_CHECKED(value, 0);
   CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
   int f = FastD2IChecked(f_number);
-  RUNTIME_ASSERT(f >= -1 && f <= 20);
-  RUNTIME_ASSERT(!Double(value).IsSpecial());
+  CHECK(f >= -1 && f <= 20);
+  CHECK(!Double(value).IsSpecial());
   char* str = DoubleToExponentialCString(value, f);
   Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
   DeleteArray(str);
@@ -88,8 +88,8 @@
   CONVERT_DOUBLE_ARG_CHECKED(value, 0);
   CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
   int f = FastD2IChecked(f_number);
-  RUNTIME_ASSERT(f >= 1 && f <= 21);
-  RUNTIME_ASSERT(!Double(value).IsSpecial());
+  CHECK(f >= 1 && f <= 21);
+  CHECK(!Double(value).IsSpecial());
   char* str = DoubleToPrecisionCString(value, f);
   Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
   DeleteArray(str);
@@ -121,7 +121,7 @@
   CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
   CONVERT_NUMBER_CHECKED(int, radix, Int32, args[1]);
   // Step 8.a. is already handled in the JS function.
-  RUNTIME_ASSERT(radix == 0 || (2 <= radix && radix <= 36));
+  CHECK(radix == 0 || (2 <= radix && radix <= 36));
 
   subject = String::Flatten(subject);
   double value;
@@ -174,20 +174,6 @@
 }
 
 
-// TODO(bmeurer): Kill this runtime entry. Uses in date.js are wrong anyway.
-RUNTIME_FUNCTION(Runtime_NumberToIntegerMapMinusZero) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, input, Object::ToNumber(input));
-  double double_value = DoubleToInteger(input->Number());
-  // Map both -0 and +0 to +0.
-  if (double_value == 0) double_value = 0;
-
-  return *isolate->factory()->NewNumber(double_value);
-}
-
-
 // Converts a Number to a Smi, if possible. Returns NaN if the number is not
 // a small integer.
 RUNTIME_FUNCTION(Runtime_NumberToSmi) {
diff --git a/src/runtime/runtime-object.cc b/src/runtime/runtime-object.cc
index 8c9c230..c7f2398 100644
--- a/src/runtime/runtime-object.cc
+++ b/src/runtime/runtime-object.cc
@@ -8,7 +8,6 @@
 #include "src/bootstrapper.h"
 #include "src/debug/debug.h"
 #include "src/isolate-inl.h"
-#include "src/json-stringifier.h"
 #include "src/messages.h"
 #include "src/property-descriptor.h"
 #include "src/runtime/runtime.h"
@@ -18,8 +17,9 @@
 
 MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
                                                Handle<Object> object,
-                                               Handle<Object> key) {
-  if (object->IsUndefined() || object->IsNull()) {
+                                               Handle<Object> key,
+                                               bool* is_found_out) {
+  if (object->IsUndefined(isolate) || object->IsNull(isolate)) {
     THROW_NEW_ERROR(
         isolate,
         NewTypeError(MessageTemplate::kNonObjectPropertyLoad, key, object),
@@ -31,7 +31,9 @@
       LookupIterator::PropertyOrElement(isolate, object, key, &success);
   if (!success) return MaybeHandle<Object>();
 
-  return Object::GetProperty(&it);
+  MaybeHandle<Object> result = Object::GetProperty(&it);
+  if (is_found_out) *is_found_out = it.IsFound();
+  return result;
 }
 
 static MaybeHandle<Object> KeyedGetObjectProperty(Isolate* isolate,
@@ -63,7 +65,9 @@
           PropertyCell* cell = PropertyCell::cast(dictionary->ValueAt(entry));
           if (cell->property_details().type() == DATA) {
             Object* value = cell->value();
-            if (!value->IsTheHole()) return Handle<Object>(value, isolate);
+            if (!value->IsTheHole(isolate)) {
+              return Handle<Object>(value, isolate);
+            }
             // If value is the hole (meaning, absent) do the general lookup.
           }
         }
@@ -195,7 +199,7 @@
         key_is_array_index
             ? index < static_cast<uint32_t>(String::cast(*object)->length())
             : key->Equals(isolate->heap()->length_string()));
-  } else if (object->IsNull() || object->IsUndefined()) {
+  } else if (object->IsNull(isolate) || object->IsUndefined(isolate)) {
     THROW_NEW_ERROR_RETURN_FAILURE(
         isolate, NewTypeError(MessageTemplate::kUndefinedOrNullToObject));
   }
@@ -208,7 +212,7 @@
                                                Handle<Object> key,
                                                Handle<Object> value,
                                                LanguageMode language_mode) {
-  if (object->IsUndefined() || object->IsNull()) {
+  if (object->IsUndefined(isolate) || object->IsNull(isolate)) {
     THROW_NEW_ERROR(
         isolate,
         NewTypeError(MessageTemplate::kNonObjectPropertyStore, key, object),
@@ -226,24 +230,12 @@
   return value;
 }
 
-MaybeHandle<Object> Runtime::BasicJsonStringify(Isolate* isolate,
-                                                Handle<Object> object) {
-  return BasicJsonStringifier(isolate).Stringify(object);
-}
-
-MaybeHandle<Object> Runtime::BasicJsonStringifyString(Isolate* isolate,
-                                                      Handle<String> string) {
-  return BasicJsonStringifier::StringifyString(isolate, string);
-}
 
 RUNTIME_FUNCTION(Runtime_GetPrototype) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
-  Handle<Object> prototype;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, prototype,
-                                     JSReceiver::GetPrototype(isolate, obj));
-  return *prototype;
+  RETURN_RESULT_OR_FAILURE(isolate, JSReceiver::GetPrototype(isolate, obj));
 }
 
 
@@ -285,42 +277,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_LoadGlobalViaContext) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(1, args.length());
-  CONVERT_SMI_ARG_CHECKED(slot, 0);
-
-  // Go up context chain to the script context.
-  Handle<Context> script_context(isolate->context()->script_context(), isolate);
-  DCHECK(script_context->IsScriptContext());
-  DCHECK(script_context->get(slot)->IsPropertyCell());
-
-  // Lookup the named property on the global object.
-  Handle<ScopeInfo> scope_info(script_context->scope_info(), isolate);
-  Handle<Name> name(scope_info->ContextSlotName(slot), isolate);
-  Handle<JSGlobalObject> global_object(script_context->global_object(),
-                                       isolate);
-  LookupIterator it(global_object, name, global_object, LookupIterator::OWN);
-
-  // Switch to fast mode only if there is a data property and it's not on
-  // a hidden prototype.
-  if (it.state() == LookupIterator::DATA &&
-      it.GetHolder<Object>().is_identical_to(global_object)) {
-    // Now update the cell in the script context.
-    Handle<PropertyCell> cell = it.GetPropertyCell();
-    script_context->set(slot, *cell);
-  } else {
-    // This is not a fast case, so keep this access in a slow mode.
-    // Store empty_property_cell here to release the outdated property cell.
-    script_context->set(slot, isolate->heap()->empty_property_cell());
-  }
-
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, Object::GetProperty(&it));
-  return *result;
-}
-
-
 namespace {
 
 Object* StoreGlobalViaContext(Isolate* isolate, int slot, Handle<Object> value,
@@ -386,13 +342,10 @@
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
 
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, Runtime::GetObjectProperty(isolate, object, key));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate,
+                           Runtime::GetObjectProperty(isolate, object, key));
 }
 
-
 // KeyedGetProperty is called from KeyedLoadIC::GenerateGeneric.
 RUNTIME_FUNCTION(Runtime_KeyedGetProperty) {
   HandleScope scope(isolate);
@@ -401,16 +354,14 @@
   CONVERT_ARG_HANDLE_CHECKED(Object, receiver_obj, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key_obj, 1);
 
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, KeyedGetObjectProperty(isolate, receiver_obj, key_obj));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(
+      isolate, KeyedGetObjectProperty(isolate, receiver_obj, key_obj));
 }
 
 
 RUNTIME_FUNCTION(Runtime_AddNamedProperty) {
   HandleScope scope(isolate);
-  RUNTIME_ASSERT(args.length() == 4);
+  DCHECK_EQ(4, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
   CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
@@ -423,14 +374,11 @@
   LookupIterator it(object, name, object, LookupIterator::OWN_SKIP_INTERCEPTOR);
   Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
   if (!maybe.IsJust()) return isolate->heap()->exception();
-  RUNTIME_ASSERT(!it.IsFound());
+  CHECK(!it.IsFound());
 #endif
 
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      JSObject::SetOwnPropertyIgnoreAttributes(object, name, value, attrs));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, JSObject::SetOwnPropertyIgnoreAttributes(
+                                        object, name, value, attrs));
 }
 
 
@@ -438,7 +386,7 @@
 // This is used to create an indexed data property into an array.
 RUNTIME_FUNCTION(Runtime_AddElement) {
   HandleScope scope(isolate);
-  RUNTIME_ASSERT(args.length() == 3);
+  DCHECK_EQ(3, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
@@ -452,25 +400,22 @@
                     LookupIterator::OWN_SKIP_INTERCEPTOR);
   Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
   if (!maybe.IsJust()) return isolate->heap()->exception();
-  RUNTIME_ASSERT(!it.IsFound());
+  CHECK(!it.IsFound());
 
   if (object->IsJSArray()) {
     Handle<JSArray> array = Handle<JSArray>::cast(object);
-    RUNTIME_ASSERT(!JSArray::WouldChangeReadOnlyLength(array, index));
+    CHECK(!JSArray::WouldChangeReadOnlyLength(array, index));
   }
 #endif
 
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      JSObject::SetOwnElementIgnoreAttributes(object, index, value, NONE));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, JSObject::SetOwnElementIgnoreAttributes(
+                                        object, index, value, NONE));
 }
 
 
 RUNTIME_FUNCTION(Runtime_AppendElement) {
   HandleScope scope(isolate);
-  RUNTIME_ASSERT(args.length() == 2);
+  DCHECK_EQ(2, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
@@ -478,9 +423,8 @@
   uint32_t index;
   CHECK(array->length()->ToArrayIndex(&index));
 
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, JSObject::AddDataElement(array, index, value, NONE));
+  RETURN_FAILURE_ON_EXCEPTION(
+      isolate, JSObject::AddDataElement(array, index, value, NONE));
   JSObject::ValidateElements(array);
   return *array;
 }
@@ -488,7 +432,7 @@
 
 RUNTIME_FUNCTION(Runtime_SetProperty) {
   HandleScope scope(isolate);
-  RUNTIME_ASSERT(args.length() == 4);
+  DCHECK_EQ(4, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
@@ -496,11 +440,9 @@
   CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode_arg, 3);
   LanguageMode language_mode = language_mode_arg;
 
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
+  RETURN_RESULT_OR_FAILURE(
+      isolate,
       Runtime::SetObjectProperty(isolate, object, key, value, language_mode));
-  return *result;
 }
 
 
@@ -591,7 +533,8 @@
   Handle<FixedArray> keys;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, keys,
-      JSReceiver::GetKeys(object, OWN_ONLY, filter, CONVERT_TO_STRING));
+      KeyAccumulator::GetKeys(object, KeyCollectionMode::kOwnOnly, filter,
+                              GetKeysConversion::kConvertToString));
 
   return *isolate->factory()->NewJSArrayWithElements(keys);
 }
@@ -639,10 +582,7 @@
   DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, new_target, 1);
-  Handle<JSObject> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     JSObject::New(target, new_target));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, JSObject::New(target, new_target));
 }
 
 
@@ -662,15 +602,14 @@
   DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
   CONVERT_ARG_HANDLE_CHECKED(Smi, index, 1);
-  RUNTIME_ASSERT((index->value() & 1) == 1);
+  CHECK((index->value() & 1) == 1);
   FieldIndex field_index =
       FieldIndex::ForLoadByFieldIndex(object->map(), index->value());
   if (field_index.is_inobject()) {
-    RUNTIME_ASSERT(field_index.property_index() <
-                   object->map()->GetInObjectProperties());
+    CHECK(field_index.property_index() <
+          object->map()->GetInObjectProperties());
   } else {
-    RUNTIME_ASSERT(field_index.outobject_array_index() <
-                   object->properties()->length());
+    CHECK(field_index.outobject_array_index() < object->properties()->length());
   }
   return *JSObject::FastPropertyAt(object, Representation::Double(),
                                    field_index);
@@ -700,9 +639,8 @@
   return isolate->heap()->ToBoolean(obj->IsJSGlobalProxy());
 }
 
-
-static bool IsValidAccessor(Handle<Object> obj) {
-  return obj->IsUndefined() || obj->IsCallable() || obj->IsNull();
+static bool IsValidAccessor(Isolate* isolate, Handle<Object> obj) {
+  return obj->IsUndefined(isolate) || obj->IsCallable() || obj->IsNull(isolate);
 }
 
 
@@ -716,12 +654,12 @@
   HandleScope scope(isolate);
   DCHECK(args.length() == 5);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
-  RUNTIME_ASSERT(!obj->IsNull());
+  CHECK(!obj->IsNull(isolate));
   CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, getter, 2);
-  RUNTIME_ASSERT(IsValidAccessor(getter));
+  CHECK(IsValidAccessor(isolate, getter));
   CONVERT_ARG_HANDLE_CHECKED(Object, setter, 3);
-  RUNTIME_ASSERT(IsValidAccessor(setter));
+  CHECK(IsValidAccessor(isolate, setter));
   CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 4);
 
   RETURN_FAILURE_ON_EXCEPTION(
@@ -739,7 +677,7 @@
   CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
   CONVERT_SMI_ARG_CHECKED(set_function_name, 4);
 
-  if (FLAG_harmony_function_name && set_function_name) {
+  if (set_function_name) {
     DCHECK(value->IsJSFunction());
     JSFunction::SetName(Handle<JSFunction>::cast(value), name,
                         isolate->factory()->empty_string());
@@ -808,8 +746,7 @@
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, getter, 2);
   CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
 
-  if (FLAG_harmony_function_name &&
-      String::cast(getter->shared()->name())->length() == 0) {
+  if (String::cast(getter->shared()->name())->length() == 0) {
     JSFunction::SetName(getter, name, isolate->factory()->get_string());
   }
 
@@ -829,8 +766,7 @@
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, setter, 2);
   CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
 
-  if (FLAG_harmony_function_name &&
-      String::cast(setter->shared()->name())->length() == 0) {
+  if (String::cast(setter->shared()->name())->length() == 0) {
     JSFunction::SetName(setter, name, isolate->factory()->set_string());
   }
 
@@ -846,10 +782,7 @@
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
-  Handle<JSReceiver> receiver;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
-                                     Object::ToObject(isolate, object));
-  return *receiver;
+  RETURN_RESULT_OR_FAILURE(isolate, Object::ToObject(isolate, object));
 }
 
 
@@ -857,10 +790,7 @@
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     Object::ToPrimitive(input));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, Object::ToPrimitive(input));
 }
 
 
@@ -868,10 +798,8 @@
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, Object::ToPrimitive(input, ToPrimitiveHint::kNumber));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(
+      isolate, Object::ToPrimitive(input, ToPrimitiveHint::kNumber));
 }
 
 
@@ -879,10 +807,8 @@
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, Object::ToPrimitive(input, ToPrimitiveHint::kString));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(
+      isolate, Object::ToPrimitive(input, ToPrimitiveHint::kString));
 }
 
 
@@ -890,9 +816,7 @@
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, Object::ToNumber(input));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, Object::ToNumber(input));
 }
 
 
@@ -900,10 +824,7 @@
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     Object::ToInteger(isolate, input));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, Object::ToInteger(isolate, input));
 }
 
 
@@ -911,10 +832,7 @@
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     Object::ToLength(isolate, input));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, Object::ToLength(isolate, input));
 }
 
 
@@ -922,10 +840,7 @@
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     Object::ToString(isolate, input));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, Object::ToString(isolate, input));
 }
 
 
@@ -933,10 +848,7 @@
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     Object::ToName(isolate, input));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, Object::ToName(isolate, input));
 }
 
 
diff --git a/src/runtime/runtime-operators.cc b/src/runtime/runtime-operators.cc
index 78dd16f..2a9255b 100644
--- a/src/runtime/runtime-operators.cc
+++ b/src/runtime/runtime-operators.cc
@@ -14,10 +14,7 @@
   DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     Object::Multiply(isolate, lhs, rhs));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, Object::Multiply(isolate, lhs, rhs));
 }
 
 
@@ -26,10 +23,7 @@
   DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     Object::Divide(isolate, lhs, rhs));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, Object::Divide(isolate, lhs, rhs));
 }
 
 
@@ -38,10 +32,7 @@
   DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     Object::Modulus(isolate, lhs, rhs));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, Object::Modulus(isolate, lhs, rhs));
 }
 
 
@@ -50,10 +41,7 @@
   DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     Object::Add(isolate, lhs, rhs));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, Object::Add(isolate, lhs, rhs));
 }
 
 
@@ -62,10 +50,7 @@
   DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     Object::Subtract(isolate, lhs, rhs));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, Object::Subtract(isolate, lhs, rhs));
 }
 
 
@@ -74,10 +59,7 @@
   DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     Object::ShiftLeft(isolate, lhs, rhs));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, Object::ShiftLeft(isolate, lhs, rhs));
 }
 
 
@@ -86,10 +68,7 @@
   DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     Object::ShiftRight(isolate, lhs, rhs));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, Object::ShiftRight(isolate, lhs, rhs));
 }
 
 
@@ -98,10 +77,8 @@
   DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, Object::ShiftRightLogical(isolate, lhs, rhs));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate,
+                           Object::ShiftRightLogical(isolate, lhs, rhs));
 }
 
 
@@ -110,10 +87,7 @@
   DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     Object::BitwiseAnd(isolate, lhs, rhs));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, Object::BitwiseAnd(isolate, lhs, rhs));
 }
 
 
@@ -122,10 +96,7 @@
   DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     Object::BitwiseOr(isolate, lhs, rhs));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, Object::BitwiseOr(isolate, lhs, rhs));
 }
 
 
@@ -134,10 +105,7 @@
   DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     Object::BitwiseXor(isolate, lhs, rhs));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, Object::BitwiseXor(isolate, lhs, rhs));
 }
 
 RUNTIME_FUNCTION(Runtime_Equal) {
@@ -221,10 +189,8 @@
   DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, callable, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, Object::InstanceOf(isolate, object, callable));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate,
+                           Object::InstanceOf(isolate, object, callable));
 }
 
 }  // namespace internal
diff --git a/src/runtime/runtime-proxy.cc b/src/runtime/runtime-proxy.cc
index 7764d25..87c7c91 100644
--- a/src/runtime/runtime-proxy.cc
+++ b/src/runtime/runtime-proxy.cc
@@ -40,17 +40,15 @@
       Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name));
   // 6. If trap is undefined, then
   int const arguments_length = args.length() - 2;
-  if (trap->IsUndefined()) {
+  if (trap->IsUndefined(isolate)) {
     // 6.a. Return Call(target, thisArgument, argumentsList).
     ScopedVector<Handle<Object>> argv(arguments_length);
     for (int i = 0; i < arguments_length; ++i) {
       argv[i] = args.at<Object>(i + 1);
     }
-    Handle<Object> result;
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, result, Execution::Call(isolate, target, receiver,
-                                         arguments_length, argv.start()));
-    return *result;
+    RETURN_RESULT_OR_FAILURE(
+        isolate, Execution::Call(isolate, target, receiver, arguments_length,
+                                 argv.start()));
   }
   // 7. Let argArray be CreateArrayFromList(argumentsList).
   Handle<JSArray> arg_array = isolate->factory()->NewJSArray(
@@ -63,12 +61,10 @@
     }
   }
   // 8. Return Call(trap, handler, «target, thisArgument, argArray»).
-  Handle<Object> trap_result;
   Handle<Object> trap_args[] = {target, receiver, arg_array};
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, trap_result,
+  RETURN_RESULT_OR_FAILURE(
+      isolate,
       Execution::Call(isolate, trap, handler, arraysize(trap_args), trap_args));
-  return *trap_result;
 }
 
 
@@ -98,7 +94,7 @@
       Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name));
   // 6. If trap is undefined, then
   int const arguments_length = args.length() - 3;
-  if (trap->IsUndefined()) {
+  if (trap->IsUndefined(isolate)) {
     // 6.a. Assert: target has a [[Construct]] internal method.
     DCHECK(target->IsConstructor());
     // 6.b. Return Construct(target, argumentsList, newTarget).
@@ -106,11 +102,9 @@
     for (int i = 0; i < arguments_length; ++i) {
       argv[i] = args.at<Object>(i + 1);
     }
-    Handle<Object> result;
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, result, Execution::New(isolate, target, new_target,
-                                        arguments_length, argv.start()));
-    return *result;
+    RETURN_RESULT_OR_FAILURE(
+        isolate, Execution::New(isolate, target, new_target, arguments_length,
+                                argv.start()));
   }
   // 7. Let argArray be CreateArrayFromList(argumentsList).
   Handle<JSArray> arg_array = isolate->factory()->NewJSArray(
diff --git a/src/runtime/runtime-regexp.cc b/src/runtime/runtime-regexp.cc
index aead017..a8133d3 100644
--- a/src/runtime/runtime-regexp.cc
+++ b/src/runtime/runtime-regexp.cc
@@ -544,9 +544,7 @@
   RegExpImpl::SetLastMatchInfo(last_match_info, subject, capture_count,
                                global_cache.LastSuccessfulMatch());
 
-  Handle<String> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, builder.ToString());
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, builder.ToString());
 }
 
 
@@ -658,8 +656,8 @@
   CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3);
 
-  RUNTIME_ASSERT(regexp->GetFlags() & JSRegExp::kGlobal);
-  RUNTIME_ASSERT(last_match_info->HasFastObjectElements());
+  CHECK(regexp->GetFlags() & JSRegExp::kGlobal);
+  CHECK(last_match_info->HasFastObjectElements());
 
   subject = String::Flatten(subject);
 
@@ -686,11 +684,11 @@
   CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
   CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[2]);
-  RUNTIME_ASSERT(limit > 0);
+  CHECK(limit > 0);
 
   int subject_length = subject->length();
   int pattern_length = pattern->length();
-  RUNTIME_ASSERT(pattern_length > 0);
+  CHECK(pattern_length > 0);
 
   if (limit == 0xffffffffu) {
     FixedArray* last_match_cache_unused;
@@ -776,14 +774,11 @@
   CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3);
   // Due to the way the JS calls are constructed this must be less than the
   // length of a string, i.e. it is always a Smi.  We check anyway for security.
-  RUNTIME_ASSERT(index >= 0);
-  RUNTIME_ASSERT(index <= subject->length());
+  CHECK(index >= 0);
+  CHECK(index <= subject->length());
   isolate->counters()->regexp_entry_runtime()->Increment();
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      RegExpImpl::Exec(regexp, subject, index, last_match_info));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(
+      isolate, RegExpImpl::Exec(regexp, subject, index, last_match_info));
 }
 
 
@@ -807,7 +802,7 @@
   HandleScope handle_scope(isolate);
   DCHECK(args.length() == 3);
   CONVERT_SMI_ARG_CHECKED(size, 0);
-  RUNTIME_ASSERT(size >= 0 && size <= FixedArray::kMaxLength);
+  CHECK(size >= 0 && size <= FixedArray::kMaxLength);
   CONVERT_ARG_HANDLE_CHECKED(Object, index, 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, input, 2);
   Handle<FixedArray> elements = isolate->factory()->NewFixedArray(size);
@@ -995,11 +990,11 @@
   CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 2);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, result_array, 3);
-  RUNTIME_ASSERT(last_match_info->HasFastObjectElements());
-  RUNTIME_ASSERT(result_array->HasFastObjectElements());
+  CHECK(last_match_info->HasFastObjectElements());
+  CHECK(result_array->HasFastObjectElements());
 
   subject = String::Flatten(subject);
-  RUNTIME_ASSERT(regexp->GetFlags() & JSRegExp::kGlobal);
+  CHECK(regexp->GetFlags() & JSRegExp::kGlobal);
 
   if (regexp->CaptureCount() == 0) {
     return SearchRegExpMultiple<false>(isolate, subject, regexp,
diff --git a/src/runtime/runtime-scopes.cc b/src/runtime/runtime-scopes.cc
index 68df582..0bdbe2e 100644
--- a/src/runtime/runtime-scopes.cc
+++ b/src/runtime/runtime-scopes.cc
@@ -16,10 +16,18 @@
 namespace v8 {
 namespace internal {
 
-static Object* ThrowRedeclarationError(Isolate* isolate, Handle<String> name) {
+enum class RedeclarationType { kSyntaxError = 0, kTypeError = 1 };
+
+static Object* ThrowRedeclarationError(Isolate* isolate, Handle<String> name,
+                                       RedeclarationType redeclaration_type) {
   HandleScope scope(isolate);
-  THROW_NEW_ERROR_RETURN_FAILURE(
-      isolate, NewTypeError(MessageTemplate::kVarRedeclaration, name));
+  if (redeclaration_type == RedeclarationType::kSyntaxError) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewSyntaxError(MessageTemplate::kVarRedeclaration, name));
+  } else {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kVarRedeclaration, name));
+  }
 }
 
 
@@ -34,13 +42,18 @@
 static Object* DeclareGlobals(Isolate* isolate, Handle<JSGlobalObject> global,
                               Handle<String> name, Handle<Object> value,
                               PropertyAttributes attr, bool is_var,
-                              bool is_const, bool is_function) {
+                              bool is_function,
+                              RedeclarationType redeclaration_type) {
   Handle<ScriptContextTable> script_contexts(
       global->native_context()->script_context_table());
   ScriptContextTable::LookupResult lookup;
   if (ScriptContextTable::Lookup(script_contexts, name, &lookup) &&
       IsLexicalVariableMode(lookup.mode)) {
-    return ThrowRedeclarationError(isolate, name);
+    // ES#sec-globaldeclarationinstantiation 6.a:
+    // If envRec.HasLexicalDeclaration(name) is true, throw a SyntaxError
+    // exception.
+    return ThrowRedeclarationError(isolate, name,
+                                   RedeclarationType::kSyntaxError);
   }
 
   // Do the lookup own properties only, see ES5 erratum.
@@ -51,7 +64,6 @@
   if (it.IsFound()) {
     PropertyAttributes old_attributes = maybe.FromJust();
     // The name was declared before; check for conflicting re-declarations.
-    if (is_const) return ThrowRedeclarationError(isolate, name);
 
     // Skip var re-declarations.
     if (is_var) return isolate->heap()->undefined_value();
@@ -68,7 +80,11 @@
       if (old_details.IsReadOnly() || old_details.IsDontEnum() ||
           (it.state() == LookupIterator::ACCESSOR &&
            it.GetAccessors()->IsAccessorPair())) {
-        return ThrowRedeclarationError(isolate, name);
+        // ES#sec-globaldeclarationinstantiation 5.d:
+        // If hasRestrictedGlobal is true, throw a SyntaxError exception.
+        // ES#sec-evaldeclarationinstantiation 8.a.iv.1.b:
+        // If fnDefinable is false, throw a TypeError exception.
+        return ThrowRedeclarationError(isolate, name, redeclaration_type);
       }
       // If the existing property is not configurable, keep its attributes. Do
       attr = old_attributes;
@@ -106,14 +122,9 @@
     Handle<String> name(String::cast(pairs->get(i)));
     Handle<Object> initial_value(pairs->get(i + 1), isolate);
 
-    // We have to declare a global const property. To capture we only
-    // assign to it when evaluating the assignment for "const x =
-    // <expr>" the initial value is the hole.
-    bool is_var = initial_value->IsUndefined();
-    bool is_const = initial_value->IsTheHole();
+    bool is_var = initial_value->IsUndefined(isolate);
     bool is_function = initial_value->IsSharedFunctionInfo();
-    DCHECK_EQ(1,
-              BoolToInt(is_var) + BoolToInt(is_const) + BoolToInt(is_function));
+    DCHECK_EQ(1, BoolToInt(is_var) + BoolToInt(is_function));
 
     Handle<Object> value;
     if (is_function) {
@@ -133,13 +144,14 @@
     bool is_native = DeclareGlobalsNativeFlag::decode(flags);
     bool is_eval = DeclareGlobalsEvalFlag::decode(flags);
     int attr = NONE;
-    if (is_const) attr |= READ_ONLY;
     if (is_function && is_native) attr |= READ_ONLY;
-    if (!is_const && !is_eval) attr |= DONT_DELETE;
+    if (!is_eval) attr |= DONT_DELETE;
 
-    Object* result = DeclareGlobals(isolate, global, name, value,
-                                    static_cast<PropertyAttributes>(attr),
-                                    is_var, is_const, is_function);
+    // ES#sec-globaldeclarationinstantiation 5.d:
+    // If hasRestrictedGlobal is true, throw a SyntaxError exception.
+    Object* result = DeclareGlobals(
+        isolate, global, name, value, static_cast<PropertyAttributes>(attr),
+        is_var, is_function, RedeclarationType::kSyntaxError);
     if (isolate->has_pending_exception()) return result;
   });
 
@@ -149,32 +161,20 @@
 
 RUNTIME_FUNCTION(Runtime_InitializeVarGlobal) {
   HandleScope scope(isolate);
-  // args[0] == name
-  // args[1] == language_mode
-  // args[2] == value (optional)
-
-  // Determine if we need to assign to the variable if it already
-  // exists (based on the number of arguments).
-  RUNTIME_ASSERT(args.length() == 3);
-
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
   CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
 
   Handle<JSGlobalObject> global(isolate->context()->global_object());
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, Object::SetProperty(global, name, value, language_mode));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(
+      isolate, Object::SetProperty(global, name, value, language_mode));
 }
 
 
 RUNTIME_FUNCTION(Runtime_InitializeConstGlobal) {
   HandleScope handle_scope(isolate);
-  // All constants are declared with an initial value. The name
-  // of the constant is the first argument and the initial value
-  // is the second.
-  RUNTIME_ASSERT(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
 
@@ -207,76 +207,69 @@
   return *value;
 }
 
-
 namespace {
 
-Object* DeclareLookupSlot(Isolate* isolate, Handle<String> name,
-                          Handle<Object> initial_value,
-                          PropertyAttributes attr) {
-  // Declarations are always made in a function, eval or script context, or
-  // a declaration block scope.
-  // In the case of eval code, the context passed is the context of the caller,
-  // which may be some nested context and not the declaration context.
+Object* DeclareEvalHelper(Isolate* isolate, Handle<String> name,
+                          Handle<Object> value) {
+  // Declarations are always made in a function, native, or script context, or
+  // a declaration block scope. Since this is called from eval, the context
+  // passed is the context of the caller, which may be some nested context and
+  // not the declaration context.
   Handle<Context> context_arg(isolate->context(), isolate);
   Handle<Context> context(context_arg->declaration_context(), isolate);
 
-  // TODO(verwaest): Unify the encoding indicating "var" with DeclareGlobals.
-  bool is_var = *initial_value == NULL;
-  bool is_const = initial_value->IsTheHole();
-  bool is_function = initial_value->IsJSFunction();
-  DCHECK_EQ(1,
-            BoolToInt(is_var) + BoolToInt(is_const) + BoolToInt(is_function));
+  DCHECK(context->IsFunctionContext() || context->IsNativeContext() ||
+         context->IsScriptContext() ||
+         (context->IsBlockContext() && context->has_extension()));
+
+  bool is_function = value->IsJSFunction();
+  bool is_var = !is_function;
+  DCHECK(!is_var || value->IsUndefined(isolate));
 
   int index;
   PropertyAttributes attributes;
   BindingFlags binding_flags;
 
-  if ((attr & EVAL_DECLARED) != 0) {
-    // Check for a conflict with a lexically scoped variable
-    context_arg->Lookup(name, LEXICAL_TEST, &index, &attributes,
-                        &binding_flags);
-    if (attributes != ABSENT && binding_flags == BINDING_CHECK_INITIALIZED) {
-      return ThrowRedeclarationError(isolate, name);
-    }
-    attr = static_cast<PropertyAttributes>(attr & ~EVAL_DECLARED);
+  // Check for a conflict with a lexically scoped variable
+  context_arg->Lookup(name, LEXICAL_TEST, &index, &attributes, &binding_flags);
+  if (attributes != ABSENT && binding_flags == BINDING_CHECK_INITIALIZED) {
+    // ES#sec-evaldeclarationinstantiation 5.a.i.1:
+    // If varEnvRec.HasLexicalDeclaration(name) is true, throw a SyntaxError
+    // exception.
+    // ES#sec-evaldeclarationinstantiation 5.d.ii.2.a.i:
+    // Throw a SyntaxError exception.
+    return ThrowRedeclarationError(isolate, name,
+                                   RedeclarationType::kSyntaxError);
   }
 
   Handle<Object> holder = context->Lookup(name, DONT_FOLLOW_CHAINS, &index,
                                           &attributes, &binding_flags);
-  if (holder.is_null()) {
-    // In case of JSProxy, an exception might have been thrown.
-    if (isolate->has_pending_exception()) return isolate->heap()->exception();
-  }
+  DCHECK(!isolate->has_pending_exception());
 
   Handle<JSObject> object;
-  Handle<Object> value =
-      is_function ? initial_value
-                  : Handle<Object>::cast(isolate->factory()->undefined_value());
 
-  // TODO(verwaest): This case should probably not be covered by this function,
-  // but by DeclareGlobals instead.
   if (attributes != ABSENT && holder->IsJSGlobalObject()) {
+    // ES#sec-evaldeclarationinstantiation 8.a.iv.1.b:
+    // If fnDefinable is false, throw a TypeError exception.
     return DeclareGlobals(isolate, Handle<JSGlobalObject>::cast(holder), name,
-                          value, attr, is_var, is_const, is_function);
+                          value, NONE, is_var, is_function,
+                          RedeclarationType::kTypeError);
   }
   if (context_arg->extension()->IsJSGlobalObject()) {
     Handle<JSGlobalObject> global(
         JSGlobalObject::cast(context_arg->extension()), isolate);
-    return DeclareGlobals(isolate, global, name, value, attr, is_var, is_const,
-                          is_function);
+    return DeclareGlobals(isolate, global, name, value, NONE, is_var,
+                          is_function, RedeclarationType::kTypeError);
   } else if (context->IsScriptContext()) {
     DCHECK(context->global_object()->IsJSGlobalObject());
     Handle<JSGlobalObject> global(
         JSGlobalObject::cast(context->global_object()), isolate);
-    return DeclareGlobals(isolate, global, name, value, attr, is_var, is_const,
-                          is_function);
+    return DeclareGlobals(isolate, global, name, value, NONE, is_var,
+                          is_function, RedeclarationType::kTypeError);
   }
 
   if (attributes != ABSENT) {
-    // The name was declared before; check for conflicting re-declarations.
-    if (is_const || (attributes & READ_ONLY) != 0) {
-      return ThrowRedeclarationError(isolate, name);
-    }
+    DCHECK_EQ(NONE, attributes);
 
     // Skip var re-declarations.
     if (is_var) return isolate->heap()->undefined_value();
@@ -284,7 +277,7 @@
     DCHECK(is_function);
     if (index != Context::kNotFound) {
       DCHECK(holder.is_identical_to(context));
-      context->set(index, *initial_value);
+      context->set(index, *value);
       return isolate->heap()->undefined_value();
     }
 
@@ -313,26 +306,28 @@
   }
 
   RETURN_FAILURE_ON_EXCEPTION(isolate, JSObject::SetOwnPropertyIgnoreAttributes(
-                                           object, name, value, attr));
+                                           object, name, value, NONE));
 
   return isolate->heap()->undefined_value();
 }
 
 }  // namespace
 
-
-RUNTIME_FUNCTION(Runtime_DeclareLookupSlot) {
+RUNTIME_FUNCTION(Runtime_DeclareEvalFunction) {
   HandleScope scope(isolate);
-  DCHECK_EQ(3, args.length());
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, initial_value, 1);
-  CONVERT_ARG_HANDLE_CHECKED(Smi, property_attributes, 2);
-
-  PropertyAttributes attributes =
-      static_cast<PropertyAttributes>(property_attributes->value());
-  return DeclareLookupSlot(isolate, name, initial_value, attributes);
+  CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+  return DeclareEvalHelper(isolate, name, value);
 }
 
+RUNTIME_FUNCTION(Runtime_DeclareEvalVar) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+  return DeclareEvalHelper(isolate, name,
+                           isolate->factory()->undefined_value());
+}
 
 namespace {
 
@@ -563,7 +558,7 @@
   {
     DisallowHeapAllocation no_gc;
     FixedArray* elements = FixedArray::cast(result->elements());
-    WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
+    WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
     for (int i = 0; i < num_elements; i++) {
       elements->set(i, *arguments[i + start_index], mode);
     }
@@ -614,7 +609,11 @@
     ScriptContextTable::LookupResult lookup;
     if (ScriptContextTable::Lookup(script_context, name, &lookup)) {
       if (IsLexicalVariableMode(mode) || IsLexicalVariableMode(lookup.mode)) {
-        return ThrowRedeclarationError(isolate, name);
+        // ES#sec-globaldeclarationinstantiation 5.b:
+        // If envRec.HasLexicalDeclaration(name) is true, throw a SyntaxError
+        // exception.
+        return ThrowRedeclarationError(isolate, name,
+                                       RedeclarationType::kSyntaxError);
       }
     }
 
@@ -624,7 +623,13 @@
       Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
       if (!maybe.IsJust()) return isolate->heap()->exception();
       if ((maybe.FromJust() & DONT_DELETE) != 0) {
-        return ThrowRedeclarationError(isolate, name);
+        // ES#sec-globaldeclarationinstantiation 5.a:
+        // If envRec.HasVarDeclaration(name) is true, throw a SyntaxError
+        // exception.
+        // ES#sec-globaldeclarationinstantiation 5.d:
+        // If hasRestrictedGlobal is true, throw a SyntaxError exception.
+        return ThrowRedeclarationError(isolate, name,
+                                       RedeclarationType::kSyntaxError);
       }
 
       JSGlobalObject::InvalidatePropertyCell(global_object, name);
@@ -777,7 +782,7 @@
     Handle<JSModule> module(context->module());
 
     for (int j = 0; j < description->length(); ++j) {
-      Handle<String> name(description->name(j));
+      Handle<String> name(description->name(j), isolate);
       VariableMode mode = description->mode(j);
       int index = description->index(j);
       switch (mode) {
@@ -791,7 +796,7 @@
               Accessors::MakeModuleExport(name, index, attr);
           Handle<Object> result =
               JSObject::SetAccessor(module, info).ToHandleChecked();
-          DCHECK(!result->IsUndefined());
+          DCHECK(!result->IsUndefined(isolate));
           USE(result);
           break;
         }
@@ -870,14 +875,14 @@
     // Check for uninitialized bindings.
     switch (flags) {
       case BINDING_CHECK_INITIALIZED:
-        if (value->IsTheHole()) {
+        if (value->IsTheHole(isolate)) {
           THROW_NEW_ERROR(isolate,
                           NewReferenceError(MessageTemplate::kNotDefined, name),
                           Object);
         }
       // FALLTHROUGH
       case BINDING_IS_INITIALIZED:
-        DCHECK(!value->IsTheHole());
+        DCHECK(!value->IsTheHole(isolate));
         if (receiver_return) *receiver_return = receiver;
         return value;
       case MISSING_BINDING:
@@ -923,10 +928,8 @@
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
-  Handle<Object> value;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, value, LoadLookupSlot(name, Object::THROW_ON_ERROR));
-  return *value;
+  RETURN_RESULT_OR_FAILURE(isolate,
+                           LoadLookupSlot(name, Object::THROW_ON_ERROR));
 }
 
 
@@ -934,10 +937,7 @@
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
-  Handle<Object> value;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, value, LoadLookupSlot(name, Object::DONT_THROW));
-  return *value;
+  RETURN_RESULT_OR_FAILURE(isolate, LoadLookupSlot(name, Object::DONT_THROW));
 }
 
 
@@ -1021,9 +1021,7 @@
   DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
-                                     StoreLookupSlot(name, value, SLOPPY));
-  return *value;
+  RETURN_RESULT_OR_FAILURE(isolate, StoreLookupSlot(name, value, SLOPPY));
 }
 
 
@@ -1032,9 +1030,7 @@
   DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
-                                     StoreLookupSlot(name, value, STRICT));
-  return *value;
+  RETURN_RESULT_OR_FAILURE(isolate, StoreLookupSlot(name, value, STRICT));
 }
 
 }  // namespace internal
diff --git a/src/runtime/runtime-simd.cc b/src/runtime/runtime-simd.cc
index 9e56142..70bc950 100644
--- a/src/runtime/runtime-simd.cc
+++ b/src/runtime/runtime-simd.cc
@@ -26,6 +26,7 @@
   // A float can't represent 2^31 - 1 or 2^32 - 1 exactly, so promote the limits
   // to double. Otherwise, the limit is truncated and numbers like 2^31 or 2^32
   // get through, causing any static_cast to be undefined.
+  from = trunc(from);
   return from >= static_cast<double>(std::numeric_limits<T>::min()) &&
          from <= static_cast<double>(std::numeric_limits<T>::max());
 }
@@ -168,9 +169,19 @@
 
 // Utility macros.
 
-#define CONVERT_SIMD_LANE_ARG_CHECKED(name, index, lanes) \
-  CONVERT_INT32_ARG_CHECKED(name, index);                 \
-  RUNTIME_ASSERT(name >= 0 && name < lanes);
+// TODO(gdeepti): Fix to use ToNumber conversion once polyfill is updated.
+#define CONVERT_SIMD_LANE_ARG_CHECKED(name, index, lanes)            \
+  Handle<Object> name_object = args.at<Object>(index);               \
+  if (!name_object->IsNumber()) {                                    \
+    THROW_NEW_ERROR_RETURN_FAILURE(                                  \
+        isolate, NewTypeError(MessageTemplate::kInvalidSimdIndex));  \
+  }                                                                  \
+  double number = name_object->Number();                             \
+  if (number < 0 || number >= lanes || !IsInt32Double(number)) {     \
+    THROW_NEW_ERROR_RETURN_FAILURE(                                  \
+        isolate, NewRangeError(MessageTemplate::kInvalidSimdIndex)); \
+  }                                                                  \
+  uint32_t name = static_cast<uint32_t>(number);
 
 #define CONVERT_SIMD_ARG_HANDLE_THROW(Type, name, index)                \
   Handle<Type> name;                                                    \
@@ -217,8 +228,10 @@
 
 // Common functions.
 
-#define GET_NUMERIC_ARG(lane_type, name, index) \
-  CONVERT_NUMBER_ARG_HANDLE_CHECKED(a, index);  \
+#define GET_NUMERIC_ARG(lane_type, name, index)              \
+  Handle<Object> a;                                          \
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(                        \
+      isolate, a, Object::ToNumber(args.at<Object>(index))); \
   name = ConvertNumber<lane_type>(a->Number());
 
 #define GET_BOOLEAN_ARG(lane_type, name, index) \
@@ -395,10 +408,14 @@
   FUNCTION(Uint16x8, uint16_t, 16, 8) \
   FUNCTION(Uint8x16, uint8_t, 8, 16)
 
-#define CONVERT_SHIFT_ARG_CHECKED(name, index)         \
-  RUNTIME_ASSERT(args[index]->IsNumber());             \
-  int32_t signed_shift = 0;                            \
-  RUNTIME_ASSERT(args[index]->ToInt32(&signed_shift)); \
+#define CONVERT_SHIFT_ARG_CHECKED(name, index)                          \
+  Handle<Object> name_object = args.at<Object>(index);                  \
+  if (!name_object->IsNumber()) {                                       \
+    THROW_NEW_ERROR_RETURN_FAILURE(                                     \
+        isolate, NewTypeError(MessageTemplate::kInvalidSimdOperation)); \
+  }                                                                     \
+  int32_t signed_shift = 0;                                             \
+  args[index]->ToInt32(&signed_shift);                                  \
   uint32_t name = bit_cast<uint32_t>(signed_shift);
 
 #define SIMD_LSL_FUNCTION(type, lane_type, lane_bits, lane_count) \
@@ -409,31 +426,29 @@
     CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0);                    \
     CONVERT_SHIFT_ARG_CHECKED(shift, 1);                          \
     lane_type lanes[kLaneCount] = {0};                            \
-    if (shift < lane_bits) {                                      \
-      for (int i = 0; i < kLaneCount; i++) {                      \
-        lanes[i] = a->get_lane(i) << shift;                       \
-      }                                                           \
+    shift &= lane_bits - 1;                                       \
+    for (int i = 0; i < kLaneCount; i++) {                        \
+      lanes[i] = a->get_lane(i) << shift;                         \
     }                                                             \
     Handle<type> result = isolate->factory()->New##type(lanes);   \
     return *result;                                               \
   }
 
-#define SIMD_LSR_FUNCTION(type, lane_type, lane_bits, lane_count) \
-  RUNTIME_FUNCTION(Runtime_##type##ShiftRightByScalar) {          \
-    static const int kLaneCount = lane_count;                     \
-    HandleScope scope(isolate);                                   \
-    DCHECK(args.length() == 2);                                   \
-    CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0);                    \
-    CONVERT_SHIFT_ARG_CHECKED(shift, 1);                          \
-    lane_type lanes[kLaneCount] = {0};                            \
-    if (shift < lane_bits) {                                      \
-      for (int i = 0; i < kLaneCount; i++) {                      \
-        lanes[i] = static_cast<lane_type>(                        \
-            bit_cast<lane_type>(a->get_lane(i)) >> shift);        \
-      }                                                           \
-    }                                                             \
-    Handle<type> result = isolate->factory()->New##type(lanes);   \
-    return *result;                                               \
+#define SIMD_LSR_FUNCTION(type, lane_type, lane_bits, lane_count)              \
+  RUNTIME_FUNCTION(Runtime_##type##ShiftRightByScalar) {                       \
+    static const int kLaneCount = lane_count;                                  \
+    HandleScope scope(isolate);                                                \
+    DCHECK(args.length() == 2);                                                \
+    CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0);                                 \
+    CONVERT_SHIFT_ARG_CHECKED(shift, 1);                                       \
+    lane_type lanes[kLaneCount] = {0};                                         \
+    shift &= lane_bits - 1;                                                    \
+    for (int i = 0; i < kLaneCount; i++) {                                     \
+      lanes[i] = static_cast<lane_type>(bit_cast<lane_type>(a->get_lane(i)) >> \
+                                        shift);                                \
+    }                                                                          \
+    Handle<type> result = isolate->factory()->New##type(lanes);                \
+    return *result;                                                            \
   }
 
 #define SIMD_ASR_FUNCTION(type, lane_type, lane_bits, lane_count)      \
@@ -443,7 +458,7 @@
     DCHECK(args.length() == 2);                                        \
     CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0);                         \
     CONVERT_SHIFT_ARG_CHECKED(shift, 1);                               \
-    if (shift >= lane_bits) shift = lane_bits - 1;                     \
+    shift &= lane_bits - 1;                                            \
     lane_type lanes[kLaneCount];                                       \
     for (int i = 0; i < kLaneCount; i++) {                             \
       int64_t shifted = static_cast<int64_t>(a->get_lane(i)) >> shift; \
@@ -785,8 +800,10 @@
     lane_type lanes[kLaneCount];                                               \
     for (int i = 0; i < kLaneCount; i++) {                                     \
       from_ctype a_value = a->get_lane(i);                                     \
-      if (a_value != a_value) a_value = 0;                                     \
-      RUNTIME_ASSERT(CanCast<lane_type>(a_value));                             \
+      if (a_value != a_value || !CanCast<lane_type>(a_value)) {                \
+        THROW_NEW_ERROR_RETURN_FAILURE(                                        \
+            isolate, NewRangeError(MessageTemplate::kInvalidSimdLaneValue));   \
+      }                                                                        \
       lanes[i] = static_cast<lane_type>(a_value);                              \
     }                                                                          \
     Handle<type> result = isolate->factory()->New##type(lanes);                \
@@ -863,6 +880,17 @@
   FUNCTION(Int32x4, int32_t, 4)           \
   FUNCTION(Uint32x4, uint32_t, 4)
 
+#define SIMD_COERCE_INDEX(name, i)                                            \
+  Handle<Object> length_object, number_object;                                \
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(                                         \
+      isolate, length_object, Object::ToLength(isolate, args.at<Object>(i))); \
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_object,                  \
+                                     Object::ToNumber(args.at<Object>(i)));   \
+  if (number_object->Number() != length_object->Number()) {                   \
+    THROW_NEW_ERROR_RETURN_FAILURE(                                           \
+        isolate, NewTypeError(MessageTemplate::kInvalidSimdIndex));           \
+  }                                                                           \
+  int32_t name = number_object->Number();
 
 // Common Load and Store Functions
 
@@ -870,11 +898,14 @@
   static const int kLaneCount = lane_count;                            \
   DCHECK(args.length() == 2);                                          \
   CONVERT_SIMD_ARG_HANDLE_THROW(JSTypedArray, tarray, 0);              \
-  CONVERT_INT32_ARG_CHECKED(index, 1)                                  \
+  SIMD_COERCE_INDEX(index, 1);                                         \
   size_t bpe = tarray->element_size();                                 \
   uint32_t bytes = count * sizeof(lane_type);                          \
   size_t byte_length = NumberToSize(isolate, tarray->byte_length());   \
-  RUNTIME_ASSERT(index >= 0 && index * bpe + bytes <= byte_length);    \
+  if (index < 0 || index * bpe + bytes > byte_length) {                \
+    THROW_NEW_ERROR_RETURN_FAILURE(                                    \
+        isolate, NewRangeError(MessageTemplate::kInvalidSimdIndex));   \
+  }                                                                    \
   size_t tarray_offset = NumberToSize(isolate, tarray->byte_offset()); \
   uint8_t* tarray_base =                                               \
       static_cast<uint8_t*>(tarray->GetBuffer()->backing_store()) +    \
@@ -883,17 +914,19 @@
   memcpy(lanes, tarray_base + index * bpe, bytes);                     \
   Handle<type> result = isolate->factory()->New##type(lanes);
 
-
 #define SIMD_STORE(type, lane_type, lane_count, count, a)              \
   static const int kLaneCount = lane_count;                            \
   DCHECK(args.length() == 3);                                          \
   CONVERT_SIMD_ARG_HANDLE_THROW(JSTypedArray, tarray, 0);              \
   CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 2);                           \
-  CONVERT_INT32_ARG_CHECKED(index, 1)                                  \
+  SIMD_COERCE_INDEX(index, 1);                                         \
   size_t bpe = tarray->element_size();                                 \
   uint32_t bytes = count * sizeof(lane_type);                          \
   size_t byte_length = NumberToSize(isolate, tarray->byte_length());   \
-  RUNTIME_ASSERT(index >= 0 && index * bpe + bytes <= byte_length);    \
+  if (index < 0 || byte_length < index * bpe + bytes) {                \
+    THROW_NEW_ERROR_RETURN_FAILURE(                                    \
+        isolate, NewRangeError(MessageTemplate::kInvalidSimdIndex));   \
+  }                                                                    \
   size_t tarray_offset = NumberToSize(isolate, tarray->byte_offset()); \
   uint8_t* tarray_base =                                               \
       static_cast<uint8_t*>(tarray->GetBuffer()->backing_store()) +    \
@@ -904,7 +937,6 @@
   }                                                                    \
   memcpy(tarray_base + index * bpe, lanes, bytes);
 
-
 #define SIMD_LOAD_FUNCTION(type, lane_type, lane_count)         \
   RUNTIME_FUNCTION(Runtime_##type##Load) {                      \
     HandleScope scope(isolate);                                 \
diff --git a/src/runtime/runtime-strings.cc b/src/runtime/runtime-strings.cc
index 0f19bf3..c1f14ad 100644
--- a/src/runtime/runtime-strings.cc
+++ b/src/runtime/runtime-strings.cc
@@ -140,7 +140,7 @@
   uint32_t start_index = 0;
   if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
 
-  RUNTIME_ASSERT(start_index <= static_cast<uint32_t>(sub->length()));
+  CHECK(start_index <= static_cast<uint32_t>(sub->length()));
   int position = StringMatch(isolate, sub, pat, start_index);
   return Smi::FromInt(position);
 }
@@ -313,16 +313,14 @@
   CONVERT_ARG_HANDLE_CHECKED(String, str1, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, str2, 1);
   isolate->counters()->string_add_runtime()->Increment();
-  Handle<String> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, isolate->factory()->NewConsString(str1, str2));
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate,
+                           isolate->factory()->NewConsString(str1, str2));
 }
 
 
 RUNTIME_FUNCTION(Runtime_InternalizeString) {
   HandleScope handles(isolate);
-  RUNTIME_ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
   return *isolate->factory()->InternalizeString(string);
 }
@@ -336,7 +334,7 @@
   CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, regexp_info, 2);
 
-  RUNTIME_ASSERT(regexp_info->HasFastObjectElements());
+  CHECK(regexp_info->HasFastObjectElements());
 
   RegExpImpl::GlobalCache global_cache(regexp, subject, isolate);
   if (global_cache.HasException()) return isolate->heap()->exception();
@@ -433,15 +431,14 @@
   CONVERT_ARG_HANDLE_CHECKED(String, special, 2);
 
   size_t actual_array_length = 0;
-  RUNTIME_ASSERT(
-      TryNumberToSize(isolate, array->length(), &actual_array_length));
-  RUNTIME_ASSERT(array_length >= 0);
-  RUNTIME_ASSERT(static_cast<size_t>(array_length) <= actual_array_length);
+  CHECK(TryNumberToSize(isolate, array->length(), &actual_array_length));
+  CHECK(array_length >= 0);
+  CHECK(static_cast<size_t>(array_length) <= actual_array_length);
 
   // This assumption is used by the slice encoding in one or two smis.
   DCHECK(Smi::kMaxValue >= String::kMaxLength);
 
-  RUNTIME_ASSERT(array->HasFastElements());
+  CHECK(array->HasFastElements());
   JSObject::EnsureCanContainHeapObjectElements(array);
 
   int special_length = special->length();
@@ -502,8 +499,8 @@
     THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewInvalidStringLengthError());
   }
   CONVERT_ARG_HANDLE_CHECKED(String, separator, 2);
-  RUNTIME_ASSERT(array->HasFastObjectElements());
-  RUNTIME_ASSERT(array_length >= 0);
+  CHECK(array->HasFastObjectElements());
+  CHECK(array_length >= 0);
 
   Handle<FixedArray> fixed_array(FixedArray::cast(array->elements()));
   if (fixed_array->length() < array_length) {
@@ -514,12 +511,12 @@
     return isolate->heap()->empty_string();
   } else if (array_length == 1) {
     Object* first = fixed_array->get(0);
-    RUNTIME_ASSERT(first->IsString());
+    CHECK(first->IsString());
     return first;
   }
 
   int separator_length = separator->length();
-  RUNTIME_ASSERT(separator_length > 0);
+  CHECK(separator_length > 0);
   int max_nof_separators =
       (String::kMaxLength + separator_length - 1) / separator_length;
   if (max_nof_separators < (array_length - 1)) {
@@ -528,7 +525,7 @@
   int length = (array_length - 1) * separator_length;
   for (int i = 0; i < array_length; i++) {
     Object* element_obj = fixed_array->get(i);
-    RUNTIME_ASSERT(element_obj->IsString());
+    CHECK(element_obj->IsString());
     String* element = String::cast(element_obj);
     int increment = element->length();
     if (increment > String::kMaxLength - length) {
@@ -550,7 +547,7 @@
   uc16* end = sink + length;
 #endif
 
-  RUNTIME_ASSERT(fixed_array->get(0)->IsString());
+  CHECK(fixed_array->get(0)->IsString());
   String* first = String::cast(fixed_array->get(0));
   String* separator_raw = *separator;
 
@@ -563,7 +560,7 @@
     String::WriteToFlat(separator_raw, sink, 0, separator_length);
     sink += separator_length;
 
-    RUNTIME_ASSERT(fixed_array->get(i)->IsString());
+    CHECK(fixed_array->get(i)->IsString());
     String* element = String::cast(fixed_array->get(i));
     int element_length = element->length();
     DCHECK(sink + element_length <= end);
@@ -642,18 +639,18 @@
   CONVERT_ARG_HANDLE_CHECKED(String, separator, 2);
   // elements_array is fast-mode JSarray of alternating positions
   // (increasing order) and strings.
-  RUNTIME_ASSERT(elements_array->HasFastSmiOrObjectElements());
+  CHECK(elements_array->HasFastSmiOrObjectElements());
   // array_length is length of original array (used to add separators);
   // separator is string to put between elements. Assumed to be non-empty.
-  RUNTIME_ASSERT(array_length > 0);
+  CHECK(array_length > 0);
 
   // Find total length of join result.
   int string_length = 0;
   bool is_one_byte = separator->IsOneByteRepresentation();
   bool overflow = false;
   CONVERT_NUMBER_CHECKED(int, elements_length, Int32, elements_array->length());
-  RUNTIME_ASSERT(elements_length <= elements_array->elements()->length());
-  RUNTIME_ASSERT((elements_length & 1) == 0);  // Even length.
+  CHECK(elements_length <= elements_array->elements()->length());
+  CHECK((elements_length & 1) == 0);  // Even length.
   FixedArray* elements = FixedArray::cast(elements_array->elements());
   {
     DisallowHeapAllocation no_gc;
@@ -1059,7 +1056,7 @@
   }
 
   Object* answer = ConvertCaseHelper(isolate, *s, *result, length, mapping);
-  if (answer->IsException() || answer->IsString()) return answer;
+  if (answer->IsException(isolate) || answer->IsString()) return answer;
 
   DCHECK(answer->IsSmi());
   length = Smi::cast(answer)->value();
@@ -1090,68 +1087,6 @@
   return ConvertCase(s, isolate, isolate->runtime_state()->to_upper_mapping());
 }
 
-
-RUNTIME_FUNCTION(Runtime_StringTrim) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
-
-  CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
-  CONVERT_BOOLEAN_ARG_CHECKED(trimLeft, 1);
-  CONVERT_BOOLEAN_ARG_CHECKED(trimRight, 2);
-
-  string = String::Flatten(string);
-  int length = string->length();
-
-  int left = 0;
-  UnicodeCache* unicode_cache = isolate->unicode_cache();
-  if (trimLeft) {
-    while (left < length &&
-           unicode_cache->IsWhiteSpaceOrLineTerminator(string->Get(left))) {
-      left++;
-    }
-  }
-
-  int right = length;
-  if (trimRight) {
-    while (
-        right > left &&
-        unicode_cache->IsWhiteSpaceOrLineTerminator(string->Get(right - 1))) {
-      right--;
-    }
-  }
-
-  return *isolate->factory()->NewSubString(string, left, right);
-}
-
-
-RUNTIME_FUNCTION(Runtime_TruncateString) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(SeqString, string, 0);
-  CONVERT_INT32_ARG_CHECKED(new_length, 1);
-  RUNTIME_ASSERT(new_length >= 0);
-  return *SeqString::Truncate(string, new_length);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NewString) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_INT32_ARG_CHECKED(length, 0);
-  CONVERT_BOOLEAN_ARG_CHECKED(is_one_byte, 1);
-  if (length == 0) return isolate->heap()->empty_string();
-  Handle<String> result;
-  if (is_one_byte) {
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, result, isolate->factory()->NewRawOneByteString(length));
-  } else {
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, result, isolate->factory()->NewRawTwoByteString(length));
-  }
-  return *result;
-}
-
-
 RUNTIME_FUNCTION(Runtime_StringLessThan) {
   HandleScope handle_scope(isolate);
   DCHECK_EQ(2, args.length());
@@ -1259,18 +1194,6 @@
   return isolate->heap()->empty_string();
 }
 
-
-RUNTIME_FUNCTION(Runtime_StringCharAt) {
-  SealHandleScope shs(isolate);
-  DCHECK(args.length() == 2);
-  if (!args[0]->IsString()) return Smi::FromInt(0);
-  if (!args[1]->IsNumber()) return Smi::FromInt(0);
-  if (std::isinf(args.number_at(1))) return isolate->heap()->empty_string();
-  Object* code = __RT_impl_Runtime_StringCharCodeAtRT(args, isolate);
-  if (code->IsNaN()) return isolate->heap()->empty_string();
-  return __RT_impl_Runtime_StringCharFromCode(Arguments(1, &code), isolate);
-}
-
 RUNTIME_FUNCTION(Runtime_ExternalStringGetChar) {
   SealHandleScope shs(isolate);
   DCHECK_EQ(2, args.length());
@@ -1279,46 +1202,6 @@
   return Smi::FromInt(string->Get(index));
 }
 
-RUNTIME_FUNCTION(Runtime_OneByteSeqStringGetChar) {
-  SealHandleScope shs(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_CHECKED(SeqOneByteString, string, 0);
-  CONVERT_INT32_ARG_CHECKED(index, 1);
-  return Smi::FromInt(string->SeqOneByteStringGet(index));
-}
-
-
-RUNTIME_FUNCTION(Runtime_OneByteSeqStringSetChar) {
-  SealHandleScope shs(isolate);
-  DCHECK(args.length() == 3);
-  CONVERT_INT32_ARG_CHECKED(index, 0);
-  CONVERT_INT32_ARG_CHECKED(value, 1);
-  CONVERT_ARG_CHECKED(SeqOneByteString, string, 2);
-  string->SeqOneByteStringSet(index, value);
-  return string;
-}
-
-
-RUNTIME_FUNCTION(Runtime_TwoByteSeqStringGetChar) {
-  SealHandleScope shs(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_CHECKED(SeqTwoByteString, string, 0);
-  CONVERT_INT32_ARG_CHECKED(index, 1);
-  return Smi::FromInt(string->SeqTwoByteStringGet(index));
-}
-
-
-RUNTIME_FUNCTION(Runtime_TwoByteSeqStringSetChar) {
-  SealHandleScope shs(isolate);
-  DCHECK(args.length() == 3);
-  CONVERT_INT32_ARG_CHECKED(index, 0);
-  CONVERT_INT32_ARG_CHECKED(value, 1);
-  CONVERT_ARG_CHECKED(SeqTwoByteString, string, 2);
-  string->SeqTwoByteStringSet(index, value);
-  return string;
-}
-
-
 RUNTIME_FUNCTION(Runtime_StringCharCodeAt) {
   SealHandleScope shs(isolate);
   DCHECK(args.length() == 2);
diff --git a/src/runtime/runtime-symbol.cc b/src/runtime/runtime-symbol.cc
index 234b456..300a643 100644
--- a/src/runtime/runtime-symbol.cc
+++ b/src/runtime/runtime-symbol.cc
@@ -16,7 +16,7 @@
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
-  RUNTIME_ASSERT(name->IsString() || name->IsUndefined());
+  CHECK(name->IsString() || name->IsUndefined(isolate));
   Handle<Symbol> symbol = isolate->factory()->NewSymbol();
   if (name->IsString()) symbol->set_name(*name);
   return *symbol;
@@ -27,7 +27,7 @@
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
-  RUNTIME_ASSERT(name->IsString() || name->IsUndefined());
+  CHECK(name->IsString() || name->IsUndefined(isolate));
   Handle<Symbol> symbol = isolate->factory()->NewPrivateSymbol();
   if (name->IsString()) symbol->set_name(*name);
   return *symbol;
@@ -52,9 +52,7 @@
     builder.AppendString(handle(String::cast(symbol->name()), isolate));
   }
   builder.AppendCharacter(')');
-  Handle<String> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, builder.Finish());
-  return *result;
+  RETURN_RESULT_OR_FAILURE(isolate, builder.Finish());
 }
 
 
diff --git a/src/runtime/runtime-test.cc b/src/runtime/runtime-test.cc
index cc15d0e..9ec29b9 100644
--- a/src/runtime/runtime-test.cc
+++ b/src/runtime/runtime-test.cc
@@ -14,6 +14,15 @@
 namespace v8 {
 namespace internal {
 
+RUNTIME_FUNCTION(Runtime_ConstructDouble) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 2);
+  CONVERT_NUMBER_CHECKED(uint32_t, hi, Uint32, args[0]);
+  CONVERT_NUMBER_CHECKED(uint32_t, lo, Uint32, args[1]);
+  uint64_t result = (static_cast<uint64_t>(hi) << 32) | lo;
+  return *isolate->factory()->NewNumber(uint64_to_double(result));
+}
+
 RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
@@ -258,7 +267,7 @@
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
-  function->shared()->ClearTypeFeedbackInfo();
+  function->ClearTypeFeedbackInfo();
   Code* unoptimized = function->shared()->code();
   if (unoptimized->kind() == Code::FUNCTION) {
     unoptimized->ClearInlineCaches();
diff --git a/src/runtime/runtime-typedarray.cc b/src/runtime/runtime-typedarray.cc
index 14b1207..37b612d 100644
--- a/src/runtime/runtime-typedarray.cc
+++ b/src/runtime/runtime-typedarray.cc
@@ -36,17 +36,17 @@
                                   "ArrayBuffer.prototype.slice")));
   }
 
-  RUNTIME_ASSERT(!source.is_identical_to(target));
+  CHECK(!source.is_identical_to(target));
   size_t start = 0, target_length = 0;
-  RUNTIME_ASSERT(TryNumberToSize(isolate, *first, &start));
-  RUNTIME_ASSERT(TryNumberToSize(isolate, *new_length, &target_length));
-  RUNTIME_ASSERT(NumberToSize(isolate, target->byte_length()) >= target_length);
+  CHECK(TryNumberToSize(isolate, *first, &start));
+  CHECK(TryNumberToSize(isolate, *new_length, &target_length));
+  CHECK(NumberToSize(isolate, target->byte_length()) >= target_length);
 
   if (target_length == 0) return isolate->heap()->undefined_value();
 
   size_t source_byte_length = NumberToSize(isolate, source->byte_length());
-  RUNTIME_ASSERT(start <= source_byte_length);
-  RUNTIME_ASSERT(source_byte_length - start >= target_length);
+  CHECK(start <= source_byte_length);
+  CHECK(source_byte_length - start >= target_length);
   uint8_t* source_data = reinterpret_cast<uint8_t*>(source->backing_store());
   uint8_t* target_data = reinterpret_cast<uint8_t*>(target->backing_store());
   CopyBytes(target_data, source_data + start, target_length);
@@ -63,7 +63,7 @@
     return isolate->heap()->undefined_value();
   }
   // Shared array buffers should never be neutered.
-  RUNTIME_ASSERT(!array_buffer->is_shared());
+  CHECK(!array_buffer->is_shared());
   DCHECK(!array_buffer->is_external());
   void* backing_store = array_buffer->backing_store();
   size_t byte_length = NumberToSize(isolate, array_buffer->byte_length());
@@ -105,32 +105,32 @@
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_length_object, 4);
   CONVERT_BOOLEAN_ARG_CHECKED(initialize, 5);
 
-  RUNTIME_ASSERT(arrayId >= Runtime::ARRAY_ID_FIRST &&
-                 arrayId <= Runtime::ARRAY_ID_LAST);
+  CHECK(arrayId >= Runtime::ARRAY_ID_FIRST &&
+        arrayId <= Runtime::ARRAY_ID_LAST);
 
   ExternalArrayType array_type = kExternalInt8Array;  // Bogus initialization.
   size_t element_size = 1;                            // Bogus initialization.
   ElementsKind fixed_elements_kind = INT8_ELEMENTS;  // Bogus initialization.
   Runtime::ArrayIdToTypeAndSize(arrayId, &array_type, &fixed_elements_kind,
                                 &element_size);
-  RUNTIME_ASSERT(holder->map()->elements_kind() == fixed_elements_kind);
+  CHECK(holder->map()->elements_kind() == fixed_elements_kind);
 
   size_t byte_offset = 0;
   size_t byte_length = 0;
-  RUNTIME_ASSERT(TryNumberToSize(isolate, *byte_offset_object, &byte_offset));
-  RUNTIME_ASSERT(TryNumberToSize(isolate, *byte_length_object, &byte_length));
+  CHECK(TryNumberToSize(isolate, *byte_offset_object, &byte_offset));
+  CHECK(TryNumberToSize(isolate, *byte_length_object, &byte_length));
 
   if (maybe_buffer->IsJSArrayBuffer()) {
     Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>::cast(maybe_buffer);
     size_t array_buffer_byte_length =
         NumberToSize(isolate, buffer->byte_length());
-    RUNTIME_ASSERT(byte_offset <= array_buffer_byte_length);
-    RUNTIME_ASSERT(array_buffer_byte_length - byte_offset >= byte_length);
+    CHECK(byte_offset <= array_buffer_byte_length);
+    CHECK(array_buffer_byte_length - byte_offset >= byte_length);
   } else {
-    RUNTIME_ASSERT(maybe_buffer->IsNull());
+    CHECK(maybe_buffer->IsNull(isolate));
   }
 
-  RUNTIME_ASSERT(byte_length % element_size == 0);
+  CHECK(byte_length % element_size == 0);
   size_t length = byte_length / element_size;
 
   if (length > static_cast<unsigned>(Smi::kMaxValue)) {
@@ -150,7 +150,7 @@
   holder->set_byte_offset(*byte_offset_object);
   holder->set_byte_length(*byte_length_object);
 
-  if (!maybe_buffer->IsNull()) {
+  if (!maybe_buffer->IsNull(isolate)) {
     Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>::cast(maybe_buffer);
     holder->set_buffer(*buffer);
 
@@ -186,8 +186,8 @@
   CONVERT_ARG_HANDLE_CHECKED(Object, source, 2);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(length_obj, 3);
 
-  RUNTIME_ASSERT(arrayId >= Runtime::ARRAY_ID_FIRST &&
-                 arrayId <= Runtime::ARRAY_ID_LAST);
+  CHECK(arrayId >= Runtime::ARRAY_ID_FIRST &&
+        arrayId <= Runtime::ARRAY_ID_LAST);
 
   ExternalArrayType array_type = kExternalInt8Array;  // Bogus initialization.
   size_t element_size = 1;                            // Bogus initialization.
@@ -195,7 +195,7 @@
   Runtime::ArrayIdToTypeAndSize(arrayId, &array_type, &fixed_elements_kind,
                                 &element_size);
 
-  RUNTIME_ASSERT(holder->map()->elements_kind() == fixed_elements_kind);
+  CHECK(holder->map()->elements_kind() == fixed_elements_kind);
 
   Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
   size_t length = 0;
@@ -204,7 +204,7 @@
     length_obj = handle(JSTypedArray::cast(*source)->length(), isolate);
     length = JSTypedArray::cast(*source)->length_value();
   } else {
-    RUNTIME_ASSERT(TryNumberToSize(isolate, *length_obj, &length));
+    CHECK(TryNumberToSize(isolate, *length_obj, &length));
   }
 
   if ((length > static_cast<unsigned>(Smi::kMaxValue)) ||
@@ -284,7 +284,6 @@
 BUFFER_VIEW_GETTER(ArrayBufferView, ByteLength, byte_length)
 BUFFER_VIEW_GETTER(ArrayBufferView, ByteOffset, byte_offset)
 BUFFER_VIEW_GETTER(TypedArray, Length, length)
-BUFFER_VIEW_GETTER(DataView, Buffer, buffer)
 
 #undef BUFFER_VIEW_GETTER
 
@@ -329,7 +328,7 @@
   Handle<JSTypedArray> target(JSTypedArray::cast(*target_obj));
   Handle<JSTypedArray> source(JSTypedArray::cast(*source_obj));
   size_t offset = 0;
-  RUNTIME_ASSERT(TryNumberToSize(isolate, *offset_obj, &offset));
+  CHECK(TryNumberToSize(isolate, *offset_obj, &offset));
   size_t target_length = target->length_value();
   size_t source_length = source->length_value();
   size_t target_byte_length = NumberToSize(isolate, target->byte_length());
diff --git a/src/runtime/runtime-uri.cc b/src/runtime/runtime-uri.cc
deleted file mode 100644
index e64e9dc..0000000
--- a/src/runtime/runtime-uri.cc
+++ /dev/null
@@ -1,293 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/runtime/runtime-utils.h"
-
-#include "src/arguments.h"
-#include "src/conversions.h"
-#include "src/isolate-inl.h"
-#include "src/objects-inl.h"
-#include "src/string-search.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-class URIUnescape : public AllStatic {
- public:
-  template <typename Char>
-  MUST_USE_RESULT static MaybeHandle<String> Unescape(Isolate* isolate,
-                                                      Handle<String> source);
-
- private:
-  static const signed char kHexValue['g'];
-
-  template <typename Char>
-  MUST_USE_RESULT static MaybeHandle<String> UnescapeSlow(Isolate* isolate,
-                                                          Handle<String> string,
-                                                          int start_index);
-
-  static INLINE(int TwoDigitHex(uint16_t character1, uint16_t character2));
-
-  template <typename Char>
-  static INLINE(int UnescapeChar(Vector<const Char> vector, int i, int length,
-                                 int* step));
-};
-
-
-const signed char URIUnescape::kHexValue[] = {
-    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -0, 1,  2,  3,  4,  5,
-    6,  7,  8,  9,  -1, -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15, -1,
-    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-    -1, -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15};
-
-
-template <typename Char>
-MaybeHandle<String> URIUnescape::Unescape(Isolate* isolate,
-                                          Handle<String> source) {
-  int index;
-  {
-    DisallowHeapAllocation no_allocation;
-    StringSearch<uint8_t, Char> search(isolate, STATIC_CHAR_VECTOR("%"));
-    index = search.Search(source->GetCharVector<Char>(), 0);
-    if (index < 0) return source;
-  }
-  return UnescapeSlow<Char>(isolate, source, index);
-}
-
-
-template <typename Char>
-MaybeHandle<String> URIUnescape::UnescapeSlow(Isolate* isolate,
-                                              Handle<String> string,
-                                              int start_index) {
-  bool one_byte = true;
-  int length = string->length();
-
-  int unescaped_length = 0;
-  {
-    DisallowHeapAllocation no_allocation;
-    Vector<const Char> vector = string->GetCharVector<Char>();
-    for (int i = start_index; i < length; unescaped_length++) {
-      int step;
-      if (UnescapeChar(vector, i, length, &step) >
-          String::kMaxOneByteCharCode) {
-        one_byte = false;
-      }
-      i += step;
-    }
-  }
-
-  DCHECK(start_index < length);
-  Handle<String> first_part =
-      isolate->factory()->NewProperSubString(string, 0, start_index);
-
-  int dest_position = 0;
-  Handle<String> second_part;
-  DCHECK(unescaped_length <= String::kMaxLength);
-  if (one_byte) {
-    Handle<SeqOneByteString> dest = isolate->factory()
-                                        ->NewRawOneByteString(unescaped_length)
-                                        .ToHandleChecked();
-    DisallowHeapAllocation no_allocation;
-    Vector<const Char> vector = string->GetCharVector<Char>();
-    for (int i = start_index; i < length; dest_position++) {
-      int step;
-      dest->SeqOneByteStringSet(dest_position,
-                                UnescapeChar(vector, i, length, &step));
-      i += step;
-    }
-    second_part = dest;
-  } else {
-    Handle<SeqTwoByteString> dest = isolate->factory()
-                                        ->NewRawTwoByteString(unescaped_length)
-                                        .ToHandleChecked();
-    DisallowHeapAllocation no_allocation;
-    Vector<const Char> vector = string->GetCharVector<Char>();
-    for (int i = start_index; i < length; dest_position++) {
-      int step;
-      dest->SeqTwoByteStringSet(dest_position,
-                                UnescapeChar(vector, i, length, &step));
-      i += step;
-    }
-    second_part = dest;
-  }
-  return isolate->factory()->NewConsString(first_part, second_part);
-}
-
-
-int URIUnescape::TwoDigitHex(uint16_t character1, uint16_t character2) {
-  if (character1 > 'f') return -1;
-  int hi = kHexValue[character1];
-  if (hi == -1) return -1;
-  if (character2 > 'f') return -1;
-  int lo = kHexValue[character2];
-  if (lo == -1) return -1;
-  return (hi << 4) + lo;
-}
-
-
-template <typename Char>
-int URIUnescape::UnescapeChar(Vector<const Char> vector, int i, int length,
-                              int* step) {
-  uint16_t character = vector[i];
-  int32_t hi = 0;
-  int32_t lo = 0;
-  if (character == '%' && i <= length - 6 && vector[i + 1] == 'u' &&
-      (hi = TwoDigitHex(vector[i + 2], vector[i + 3])) != -1 &&
-      (lo = TwoDigitHex(vector[i + 4], vector[i + 5])) != -1) {
-    *step = 6;
-    return (hi << 8) + lo;
-  } else if (character == '%' && i <= length - 3 &&
-             (lo = TwoDigitHex(vector[i + 1], vector[i + 2])) != -1) {
-    *step = 3;
-    return lo;
-  } else {
-    *step = 1;
-    return character;
-  }
-}
-
-
-class URIEscape : public AllStatic {
- public:
-  template <typename Char>
-  MUST_USE_RESULT static MaybeHandle<String> Escape(Isolate* isolate,
-                                                    Handle<String> string);
-
- private:
-  static const char kHexChars[17];
-  static const char kNotEscaped[256];
-
-  static bool IsNotEscaped(uint16_t c) { return kNotEscaped[c] != 0; }
-};
-
-
-const char URIEscape::kHexChars[] = "0123456789ABCDEF";
-
-
-// kNotEscaped is generated by the following:
-//
-// #!/bin/perl
-// for (my $i = 0; $i < 256; $i++) {
-//   print "\n" if $i % 16 == 0;
-//   my $c = chr($i);
-//   my $escaped = 1;
-//   $escaped = 0 if $c =~ m#[A-Za-z0-9@*_+./-]#;
-//   print $escaped ? "0, " : "1, ";
-// }
-
-const char URIEscape::kNotEscaped[] = {
-    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1,
-    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
-    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,
-    0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-    1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-
-
-template <typename Char>
-MaybeHandle<String> URIEscape::Escape(Isolate* isolate, Handle<String> string) {
-  DCHECK(string->IsFlat());
-  int escaped_length = 0;
-  int length = string->length();
-
-  {
-    DisallowHeapAllocation no_allocation;
-    Vector<const Char> vector = string->GetCharVector<Char>();
-    for (int i = 0; i < length; i++) {
-      uint16_t c = vector[i];
-      if (c >= 256) {
-        escaped_length += 6;
-      } else if (IsNotEscaped(c)) {
-        escaped_length++;
-      } else {
-        escaped_length += 3;
-      }
-
-      // We don't allow strings that are longer than a maximal length.
-      DCHECK(String::kMaxLength < 0x7fffffff - 6);     // Cannot overflow.
-      if (escaped_length > String::kMaxLength) break;  // Provoke exception.
-    }
-  }
-
-  // No length change implies no change.  Return original string if no change.
-  if (escaped_length == length) return string;
-
-  Handle<SeqOneByteString> dest;
-  ASSIGN_RETURN_ON_EXCEPTION(
-      isolate, dest, isolate->factory()->NewRawOneByteString(escaped_length),
-      String);
-  int dest_position = 0;
-
-  {
-    DisallowHeapAllocation no_allocation;
-    Vector<const Char> vector = string->GetCharVector<Char>();
-    for (int i = 0; i < length; i++) {
-      uint16_t c = vector[i];
-      if (c >= 256) {
-        dest->SeqOneByteStringSet(dest_position, '%');
-        dest->SeqOneByteStringSet(dest_position + 1, 'u');
-        dest->SeqOneByteStringSet(dest_position + 2, kHexChars[c >> 12]);
-        dest->SeqOneByteStringSet(dest_position + 3, kHexChars[(c >> 8) & 0xf]);
-        dest->SeqOneByteStringSet(dest_position + 4, kHexChars[(c >> 4) & 0xf]);
-        dest->SeqOneByteStringSet(dest_position + 5, kHexChars[c & 0xf]);
-        dest_position += 6;
-      } else if (IsNotEscaped(c)) {
-        dest->SeqOneByteStringSet(dest_position, c);
-        dest_position++;
-      } else {
-        dest->SeqOneByteStringSet(dest_position, '%');
-        dest->SeqOneByteStringSet(dest_position + 1, kHexChars[c >> 4]);
-        dest->SeqOneByteStringSet(dest_position + 2, kHexChars[c & 0xf]);
-        dest_position += 3;
-      }
-    }
-  }
-
-  return dest;
-}
-
-
-RUNTIME_FUNCTION(Runtime_URIEscape) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(1, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
-  Handle<String> source;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, source,
-                                     Object::ToString(isolate, input));
-  source = String::Flatten(source);
-  Handle<String> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, source->IsOneByteRepresentationUnderneath()
-                           ? URIEscape::Escape<uint8_t>(isolate, source)
-                           : URIEscape::Escape<uc16>(isolate, source));
-  return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_URIUnescape) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
-  Handle<String> source;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, source,
-                                     Object::ToString(isolate, input));
-  source = String::Flatten(source);
-  Handle<String> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, source->IsOneByteRepresentationUnderneath()
-                           ? URIUnescape::Unescape<uint8_t>(isolate, source)
-                           : URIUnescape::Unescape<uc16>(isolate, source));
-  return *result;
-}
-
-}  // namespace internal
-}  // namespace v8
diff --git a/src/runtime/runtime-utils.h b/src/runtime/runtime-utils.h
index 17c78d5..5cdf043 100644
--- a/src/runtime/runtime-utils.h
+++ b/src/runtime/runtime-utils.h
@@ -21,15 +21,6 @@
     }                                              \
   } while (0)
 
-#define RUNTIME_ASSERT_HANDLIFIED(value, T)        \
-  do {                                             \
-    if (!(value)) {                                \
-      V8_RuntimeError(__FILE__, __LINE__, #value); \
-      isolate->ThrowIllegalOperation();            \
-      return MaybeHandle<T>();                     \
-    }                                              \
-  } while (0)
-
 #else
 
 #define RUNTIME_ASSERT(value)                  \
@@ -39,14 +30,6 @@
     }                                          \
   } while (0)
 
-#define RUNTIME_ASSERT_HANDLIFIED(value, T) \
-  do {                                      \
-    if (!(value)) {                         \
-      isolate->ThrowIllegalOperation();     \
-      return MaybeHandle<T>();              \
-    }                                       \
-  } while (0)
-
 #endif
 
 // Cast the given object to a value of the specified type and store
@@ -69,7 +52,7 @@
 // and return.
 #define CONVERT_BOOLEAN_ARG_CHECKED(name, index) \
   RUNTIME_ASSERT(args[index]->IsBoolean());      \
-  bool name = args[index]->IsTrue();
+  bool name = args[index]->IsTrue(isolate);
 
 // Cast the given argument to a Smi and store its value in an int variable
 // with the given name.  If the argument is not a Smi call IllegalOperation
diff --git a/src/runtime/runtime.h b/src/runtime/runtime.h
index 2c80280..e325d4f 100644
--- a/src/runtime/runtime.h
+++ b/src/runtime/runtime.h
@@ -39,10 +39,9 @@
   F(MoveArrayContents, 2, 1)         \
   F(EstimateNumberOfElements, 1, 1)  \
   F(GetArrayKeys, 2, 1)              \
-  F(ArrayConstructor, -1, 1)         \
   F(NewArray, -1 /* >= 3 */, 1)      \
-  F(InternalArrayConstructor, -1, 1) \
   F(ArrayPush, -1, 1)                \
+  F(FunctionBind, -1, 1)             \
   F(NormalizeElements, 1, 1)         \
   F(GrowArrayElements, 2, 1)         \
   F(HasComplexElements, 1, 1)        \
@@ -135,56 +134,64 @@
   F(DateCurrentTime, 0, 1)         \
   F(ThrowNotDateError, 0, 1)
 
-#define FOR_EACH_INTRINSIC_DEBUG(F)            \
-  F(HandleDebuggerStatement, 0, 1)             \
-  F(DebugBreak, 1, 1)                          \
-  F(DebugBreakOnBytecode, 1, 1)                \
-  F(SetDebugEventListener, 2, 1)               \
-  F(ScheduleBreak, 0, 1)                       \
-  F(DebugGetInternalProperties, 1, 1)          \
-  F(DebugGetPropertyDetails, 2, 1)             \
-  F(DebugGetProperty, 2, 1)                    \
-  F(DebugPropertyTypeFromDetails, 1, 1)        \
-  F(DebugPropertyAttributesFromDetails, 1, 1)  \
-  F(CheckExecutionState, 1, 1)                 \
-  F(GetFrameCount, 1, 1)                       \
-  F(GetFrameDetails, 2, 1)                     \
-  F(GetScopeCount, 2, 1)                       \
-  F(GetScopeDetails, 4, 1)                     \
-  F(GetAllScopesDetails, 4, 1)                 \
-  F(GetFunctionScopeCount, 1, 1)               \
-  F(GetFunctionScopeDetails, 2, 1)             \
-  F(SetScopeVariableValue, 6, 1)               \
-  F(DebugPrintScopes, 0, 1)                    \
-  F(SetBreakPointsActive, 1, 1)                \
-  F(GetBreakLocations, 2, 1)                   \
-  F(SetFunctionBreakPoint, 3, 1)               \
-  F(SetScriptBreakPoint, 4, 1)                 \
-  F(ClearBreakPoint, 1, 1)                     \
-  F(ChangeBreakOnException, 2, 1)              \
-  F(IsBreakOnException, 1, 1)                  \
-  F(PrepareStep, 2, 1)                         \
-  F(ClearStepping, 0, 1)                       \
-  F(DebugEvaluate, 6, 1)                       \
-  F(DebugEvaluateGlobal, 4, 1)                 \
-  F(DebugGetLoadedScripts, 0, 1)               \
-  F(DebugReferencedBy, 3, 1)                   \
-  F(DebugConstructedBy, 2, 1)                  \
-  F(DebugGetPrototype, 1, 1)                   \
-  F(DebugSetScriptSource, 2, 1)                \
-  F(FunctionGetInferredName, 1, 1)             \
-  F(FunctionGetDebugName, 1, 1)                \
-  F(GetFunctionCodePositionFromSource, 2, 1)   \
-  F(ExecuteInDebugContext, 1, 1)               \
-  F(GetDebugContext, 0, 1)                     \
-  F(CollectGarbage, 1, 1)                      \
-  F(GetHeapUsage, 0, 1)                        \
-  F(GetScript, 1, 1)                           \
-  F(DebugPrepareStepInIfStepping, 1, 1)        \
-  F(DebugPushPromise, 2, 1)                    \
-  F(DebugPopPromise, 0, 1)                     \
-  F(DebugAsyncTaskEvent, 1, 1)                 \
-  F(DebugIsActive, 0, 1)                       \
+#define FOR_EACH_INTRINSIC_DEBUG(F)             \
+  F(HandleDebuggerStatement, 0, 1)              \
+  F(DebugBreak, 1, 1)                           \
+  F(DebugBreakOnBytecode, 1, 1)                 \
+  F(SetDebugEventListener, 2, 1)                \
+  F(ScheduleBreak, 0, 1)                        \
+  F(DebugGetInternalProperties, 1, 1)           \
+  F(DebugGetPropertyDetails, 2, 1)              \
+  F(DebugGetProperty, 2, 1)                     \
+  F(DebugPropertyTypeFromDetails, 1, 1)         \
+  F(DebugPropertyAttributesFromDetails, 1, 1)   \
+  F(CheckExecutionState, 1, 1)                  \
+  F(GetFrameCount, 1, 1)                        \
+  F(GetFrameDetails, 2, 1)                      \
+  F(GetScopeCount, 2, 1)                        \
+  F(GetScopeDetails, 4, 1)                      \
+  F(GetAllScopesDetails, 4, 1)                  \
+  F(GetFunctionScopeCount, 1, 1)                \
+  F(GetFunctionScopeDetails, 2, 1)              \
+  F(SetScopeVariableValue, 6, 1)                \
+  F(DebugPrintScopes, 0, 1)                     \
+  F(SetBreakPointsActive, 1, 1)                 \
+  F(GetBreakLocations, 2, 1)                    \
+  F(SetFunctionBreakPoint, 3, 1)                \
+  F(SetScriptBreakPoint, 4, 1)                  \
+  F(ClearBreakPoint, 1, 1)                      \
+  F(ChangeBreakOnException, 2, 1)               \
+  F(IsBreakOnException, 1, 1)                   \
+  F(PrepareStep, 2, 1)                          \
+  F(ClearStepping, 0, 1)                        \
+  F(DebugEvaluate, 6, 1)                        \
+  F(DebugEvaluateGlobal, 4, 1)                  \
+  F(DebugGetLoadedScripts, 0, 1)                \
+  F(DebugReferencedBy, 3, 1)                    \
+  F(DebugConstructedBy, 2, 1)                   \
+  F(DebugGetPrototype, 1, 1)                    \
+  F(DebugSetScriptSource, 2, 1)                 \
+  F(FunctionGetInferredName, 1, 1)              \
+  F(FunctionGetDebugName, 1, 1)                 \
+  F(GetFunctionCodePositionFromSource, 2, 1)    \
+  F(ExecuteInDebugContext, 1, 1)                \
+  F(GetDebugContext, 0, 1)                      \
+  F(CollectGarbage, 1, 1)                       \
+  F(GetHeapUsage, 0, 1)                         \
+  F(GetScript, 1, 1)                            \
+  F(ScriptLineCount, 1, 1)                      \
+  F(ScriptLineStartPosition, 2, 1)              \
+  F(ScriptLineEndPosition, 2, 1)                \
+  F(ScriptLocationFromLine, 4, 1)               \
+  F(ScriptPositionInfo, 3, 1)                   \
+  F(ScriptSourceLine, 2, 1)                     \
+  F(DebugPrepareStepInIfStepping, 1, 1)         \
+  F(DebugPrepareStepInSuspendedGenerator, 0, 1) \
+  F(DebugRecordAsyncFunction, 1, 1)             \
+  F(DebugPushPromise, 2, 1)                     \
+  F(DebugPopPromise, 0, 1)                      \
+  F(DebugAsyncTaskEvent, 1, 1)                  \
+  F(DebugIsActive, 0, 1)                        \
   F(DebugBreakInOptimizedCode, 0, 1)
 
 #define FOR_EACH_INTRINSIC_FORIN(F) \
@@ -229,14 +236,10 @@
   F(GeneratorClose, 1, 1)               \
   F(GeneratorGetFunction, 1, 1)         \
   F(GeneratorGetReceiver, 1, 1)         \
-  F(GeneratorGetInput, 1, 1)            \
-  F(GeneratorSetContext, 1, 1)          \
+  F(GeneratorGetInputOrDebugPos, 1, 1)  \
   F(GeneratorGetContinuation, 1, 1)     \
-  F(GeneratorSetContinuation, 2, 1)     \
   F(GeneratorGetSourcePosition, 1, 1)   \
-  F(GeneratorGetResumeMode, 1, 1)       \
-  F(GeneratorLoadRegister, 2, 1)        \
-  F(GeneratorStoreRegister, 3, 1)
+  F(GeneratorGetResumeMode, 1, 1)
 
 #ifdef V8_I18N_SUPPORT
 #define FOR_EACH_INTRINSIC_I18N(F)           \
@@ -265,7 +268,8 @@
   F(BreakIteratorBreakType, 1, 1)            \
   F(StringToLowerCaseI18N, 1, 1)             \
   F(StringToUpperCaseI18N, 1, 1)             \
-  F(StringLocaleConvertCase, 3, 1)
+  F(StringLocaleConvertCase, 3, 1)           \
+  F(DateCacheVersion, 0, 1)
 #else
 #define FOR_EACH_INTRINSIC_I18N(F)
 #endif
@@ -296,6 +300,8 @@
   F(Interrupt, 0, 1)                                \
   F(AllocateInNewSpace, 1, 1)                       \
   F(AllocateInTargetSpace, 2, 1)                    \
+  F(AllocateSeqOneByteString, 1, 1)                 \
+  F(AllocateSeqTwoByteString, 1, 1)                 \
   F(CollectStackTrace, 2, 1)                        \
   F(MessageGetStartPosition, 1, 1)                  \
   F(MessageGetScript, 1, 1)                         \
@@ -320,14 +326,8 @@
   F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1)  \
   F(EnqueueMicrotask, 1, 1)                         \
   F(RunMicrotasks, 0, 1)                            \
-  F(WasmGetFunctionName, 2, 1)                      \
-  F(OrdinaryHasInstance, 2, 1)
-
-#define FOR_EACH_INTRINSIC_JSON(F) \
-  F(QuoteJSONString, 1, 1)         \
-  F(BasicJSONStringify, 1, 1)      \
-  F(ParseJson, 1, 1)
-
+  F(OrdinaryHasInstance, 2, 1)                      \
+  F(IsWasmObject, 1, 1)
 
 #define FOR_EACH_INTRINSIC_LITERALS(F) \
   F(CreateRegExpLiteral, 4, 1)         \
@@ -351,13 +351,8 @@
 
 
 #define FOR_EACH_INTRINSIC_MATHS(F) \
-  F(MathLogRT, 1, 1)                \
   F(DoubleHi, 1, 1)                 \
   F(DoubleLo, 1, 1)                 \
-  F(ConstructDouble, 2, 1)          \
-  F(RemPiO2, 2, 1)                  \
-  F(MathAtan2, 2, 1)                \
-  F(MathExpRT, 1, 1)                \
   F(MathPow, 2, 1)                  \
   F(MathPowRT, 2, 1)                \
   F(GenerateRandomNumbers, 1, 1)
@@ -374,7 +369,6 @@
   F(StringParseFloat, 1, 1)            \
   F(NumberToString, 1, 1)              \
   F(NumberToStringSkipCache, 1, 1)     \
-  F(NumberToIntegerMapMinusZero, 1, 1) \
   F(NumberToSmi, 1, 1)                 \
   F(SmiLexicographicCompare, 2, 1)     \
   F(MaxSmi, 0, 1)                      \
@@ -391,7 +385,6 @@
   F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
   F(GetProperty, 2, 1)                               \
   F(KeyedGetProperty, 2, 1)                          \
-  F(LoadGlobalViaContext, 1, 1)                      \
   F(StoreGlobalViaContext_Sloppy, 2, 1)              \
   F(StoreGlobalViaContext_Strict, 2, 1)              \
   F(AddNamedProperty, 4, 1)                          \
@@ -479,30 +472,31 @@
   F(RegExpExecReThrow, 4, 1)                   \
   F(IsRegExp, 1, 1)
 
-#define FOR_EACH_INTRINSIC_SCOPES(F)       \
-  F(ThrowConstAssignError, 0, 1)           \
-  F(DeclareGlobals, 2, 1)                  \
-  F(InitializeVarGlobal, 3, 1)             \
-  F(InitializeConstGlobal, 2, 1)           \
-  F(DeclareLookupSlot, 3, 1)               \
-  F(NewSloppyArguments_Generic, 1, 1)      \
-  F(NewStrictArguments, 1, 1)              \
-  F(NewRestParameter, 1, 1)                \
-  F(NewSloppyArguments, 3, 1)              \
-  F(NewClosure, 1, 1)                      \
-  F(NewClosure_Tenured, 1, 1)              \
-  F(NewScriptContext, 2, 1)                \
-  F(NewFunctionContext, 1, 1)              \
-  F(PushWithContext, 2, 1)                 \
-  F(PushCatchContext, 3, 1)                \
-  F(PushBlockContext, 2, 1)                \
-  F(IsJSModule, 1, 1)                      \
-  F(PushModuleContext, 2, 1)               \
-  F(DeclareModules, 1, 1)                  \
-  F(DeleteLookupSlot, 1, 1)                \
-  F(LoadLookupSlot, 1, 1)                  \
-  F(LoadLookupSlotInsideTypeof, 1, 1)      \
-  F(StoreLookupSlot_Sloppy, 2, 1)          \
+#define FOR_EACH_INTRINSIC_SCOPES(F)  \
+  F(ThrowConstAssignError, 0, 1)      \
+  F(DeclareGlobals, 2, 1)             \
+  F(InitializeVarGlobal, 3, 1)        \
+  F(InitializeConstGlobal, 2, 1)      \
+  F(DeclareEvalFunction, 2, 1)        \
+  F(DeclareEvalVar, 1, 1)             \
+  F(NewSloppyArguments_Generic, 1, 1) \
+  F(NewStrictArguments, 1, 1)         \
+  F(NewRestParameter, 1, 1)           \
+  F(NewSloppyArguments, 3, 1)         \
+  F(NewClosure, 1, 1)                 \
+  F(NewClosure_Tenured, 1, 1)         \
+  F(NewScriptContext, 2, 1)           \
+  F(NewFunctionContext, 1, 1)         \
+  F(PushWithContext, 2, 1)            \
+  F(PushCatchContext, 3, 1)           \
+  F(PushBlockContext, 2, 1)           \
+  F(IsJSModule, 1, 1)                 \
+  F(PushModuleContext, 2, 1)          \
+  F(DeclareModules, 1, 1)             \
+  F(DeleteLookupSlot, 1, 1)           \
+  F(LoadLookupSlot, 1, 1)             \
+  F(LoadLookupSlotInsideTypeof, 1, 1) \
+  F(StoreLookupSlot_Sloppy, 2, 1)     \
   F(StoreLookupSlot_Strict, 2, 1)
 
 #define FOR_EACH_INTRINSIC_SIMD(F)     \
@@ -832,9 +826,6 @@
   F(StringToArray, 2, 1)                  \
   F(StringToLowerCase, 1, 1)              \
   F(StringToUpperCase, 1, 1)              \
-  F(StringTrim, 3, 1)                     \
-  F(TruncateString, 2, 1)                 \
-  F(NewString, 2, 1)                      \
   F(StringLessThan, 2, 1)                 \
   F(StringLessThanOrEqual, 2, 1)          \
   F(StringGreaterThan, 2, 1)              \
@@ -843,12 +834,7 @@
   F(StringNotEqual, 2, 1)                 \
   F(FlattenString, 1, 1)                  \
   F(StringCharFromCode, 1, 1)             \
-  F(StringCharAt, 2, 1)                   \
   F(ExternalStringGetChar, 2, 1)          \
-  F(OneByteSeqStringGetChar, 2, 1)        \
-  F(OneByteSeqStringSetChar, 3, 1)        \
-  F(TwoByteSeqStringGetChar, 2, 1)        \
-  F(TwoByteSeqStringSetChar, 3, 1)        \
   F(StringCharCodeAt, 2, 1)
 
 #define FOR_EACH_INTRINSIC_SYMBOL(F) \
@@ -860,6 +846,7 @@
   F(SymbolIsPrivate, 1, 1)
 
 #define FOR_EACH_INTRINSIC_TEST(F)            \
+  F(ConstructDouble, 2, 1)                    \
   F(DeoptimizeFunction, 1, 1)                 \
   F(DeoptimizeNow, 0, 1)                      \
   F(RunningInSimulator, 0, 1)                 \
@@ -919,7 +906,6 @@
   F(ArrayBufferViewGetByteLength, 1, 1)      \
   F(ArrayBufferViewGetByteOffset, 1, 1)      \
   F(TypedArrayGetLength, 1, 1)               \
-  F(DataViewGetBuffer, 1, 1)                 \
   F(TypedArrayGetBuffer, 1, 1)               \
   F(TypedArraySetFastCases, 3, 1)            \
   F(TypedArrayMaxSizeInHeap, 0, 1)           \
@@ -945,10 +931,6 @@
   F(DataViewSetFloat64, 4, 1)
 
 
-#define FOR_EACH_INTRINSIC_URI(F) \
-  F(URIEscape, 1, 1)              \
-  F(URIUnescape, 1, 1)
-
 #define FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
   F(LoadLookupSlotForCall, 1, 2)
 
@@ -969,6 +951,8 @@
   F(KeyedStoreIC_MissFromStubFailure, 5, 1)  \
   F(KeyedStoreIC_Slow, 5, 1)                 \
   F(LoadElementWithInterceptor, 2, 1)        \
+  F(LoadGlobalIC_Miss, 2, 1)                 \
+  F(LoadGlobalIC_Slow, 2, 1)                 \
   F(LoadIC_Miss, 4, 1)                       \
   F(LoadIC_MissFromStubFailure, 4, 1)        \
   F(LoadPropertyWithInterceptor, 3, 1)       \
@@ -981,7 +965,6 @@
   F(ToBooleanIC_Miss, 1, 1)                  \
   F(Unreachable, 0, 1)
 
-
 #define FOR_EACH_INTRINSIC_RETURN_OBJECT(F) \
   FOR_EACH_INTRINSIC_IC(F)                  \
   FOR_EACH_INTRINSIC_ARRAY(F)               \
@@ -998,7 +981,6 @@
   FOR_EACH_INTRINSIC_GENERATOR(F)           \
   FOR_EACH_INTRINSIC_I18N(F)                \
   FOR_EACH_INTRINSIC_INTERNAL(F)            \
-  FOR_EACH_INTRINSIC_JSON(F)                \
   FOR_EACH_INTRINSIC_LITERALS(F)            \
   FOR_EACH_INTRINSIC_LIVEEDIT(F)            \
   FOR_EACH_INTRINSIC_MATHS(F)               \
@@ -1012,8 +994,7 @@
   FOR_EACH_INTRINSIC_STRINGS(F)             \
   FOR_EACH_INTRINSIC_SYMBOL(F)              \
   FOR_EACH_INTRINSIC_TEST(F)                \
-  FOR_EACH_INTRINSIC_TYPEDARRAY(F)          \
-  FOR_EACH_INTRINSIC_URI(F)
+  FOR_EACH_INTRINSIC_TYPEDARRAY(F)
 
 // FOR_EACH_INTRINSIC defines the list of all intrinsics, coming in 2 flavors,
 // either returning an object or a pair.
@@ -1092,13 +1073,8 @@
       Handle<Object> value, LanguageMode language_mode);
 
   MUST_USE_RESULT static MaybeHandle<Object> GetObjectProperty(
-      Isolate* isolate, Handle<Object> object, Handle<Object> key);
-
-  MUST_USE_RESULT static MaybeHandle<Object> BasicJsonStringify(
-      Isolate* isolate, Handle<Object> object);
-
-  MUST_USE_RESULT static MaybeHandle<Object> BasicJsonStringifyString(
-      Isolate* isolate, Handle<String> string);
+      Isolate* isolate, Handle<Object> object, Handle<Object> key,
+      bool* is_found_out = nullptr);
 
   enum TypedArrayId {
     // arrayIds below should be synchronized with typedarray.js natives.
diff --git a/src/s390/OWNERS b/src/s390/OWNERS
index eb007cb..752e8e3 100644
--- a/src/s390/OWNERS
+++ b/src/s390/OWNERS
@@ -3,3 +3,4 @@
 joransiu@ca.ibm.com
 mbrandy@us.ibm.com
 michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/src/s390/assembler-s390.cc b/src/s390/assembler-s390.cc
index 9aa2aab..07ef6c0 100644
--- a/src/s390/assembler-s390.cc
+++ b/src/s390/assembler-s390.cc
@@ -228,31 +228,20 @@
       reinterpret_cast<intptr_t>(Assembler::target_address_at(pc_, host_)));
 }
 
-void RelocInfo::update_wasm_memory_reference(
-    Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
-    ICacheFlushMode icache_flush_mode) {
-  DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
-  if (IsWasmMemoryReference(rmode_)) {
-    Address updated_memory_reference;
-    DCHECK(old_base <= wasm_memory_reference() &&
-           wasm_memory_reference() < old_base + old_size);
-    updated_memory_reference = new_base + (wasm_memory_reference() - old_base);
-    DCHECK(new_base <= updated_memory_reference &&
-           updated_memory_reference < new_base + new_size);
-    Assembler::set_target_address_at(
-        isolate_, pc_, host_, updated_memory_reference, icache_flush_mode);
-  } else if (IsWasmMemorySizeReference(rmode_)) {
-    uint32_t updated_size_reference;
-    DCHECK(wasm_memory_size_reference() <= old_size);
-    updated_size_reference =
-        new_size + (wasm_memory_size_reference() - old_size);
-    DCHECK(updated_size_reference <= new_size);
-    Assembler::set_target_address_at(
-        isolate_, pc_, host_, reinterpret_cast<Address>(updated_size_reference),
-        icache_flush_mode);
-  } else {
-    UNREACHABLE();
-  }
+Address RelocInfo::wasm_global_reference() {
+  DCHECK(IsWasmGlobalReference(rmode_));
+  return Assembler::target_address_at(pc_, host_);
+}
+
+void RelocInfo::unchecked_update_wasm_memory_reference(
+    Address address, ICacheFlushMode flush_mode) {
+  Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
+}
+
+void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
+                                                  ICacheFlushMode flush_mode) {
+  Assembler::set_target_address_at(isolate_, pc_, host_,
+                                   reinterpret_cast<Address>(size), flush_mode);
 }
 
 // -----------------------------------------------------------------------------
@@ -311,6 +300,8 @@
   desc->instr_size = pc_offset();
   desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
   desc->origin = this;
+  desc->unwinding_info_size = 0;
+  desc->unwinding_info = nullptr;
 }
 
 void Assembler::Align(int m) {
@@ -2504,7 +2495,6 @@
 
 void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode,
                      TypeFeedbackId ast_id) {
-  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
 
   int32_t target_index = emit_code_target(target, rmode, ast_id);
diff --git a/src/s390/assembler-s390.h b/src/s390/assembler-s390.h
index 28cdbb6..391a5d4 100644
--- a/src/s390/assembler-s390.h
+++ b/src/s390/assembler-s390.h
@@ -145,8 +145,6 @@
     return r;
   }
 
-  const char* ToString();
-  bool IsAllocatable() const;
   bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
   bool is(Register reg) const { return reg_code == reg.reg_code; }
   int code() const {
@@ -187,6 +185,8 @@
 const Register kRootRegister = r10;   // Roots array pointer.
 const Register cp = r13;              // JavaScript context pointer.
 
+static const bool kSimpleFPAliasing = true;
+
 // Double word FP register.
 struct DoubleRegister {
   enum Code {
@@ -200,8 +200,6 @@
   static const int kNumRegisters = Code::kAfterLast;
   static const int kMaxNumRegisters = kNumRegisters;
 
-  const char* ToString();
-  bool IsAllocatable() const;
   bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
   bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
 
@@ -548,7 +546,6 @@
 
   // Helper for unconditional branch to Label with update to save register
   void b(Register r, Label* l) {
-    positions_recorder()->WriteRecordedPositions();
     int32_t halfwords = branch_offset(l) / 2;
     brasl(r, Operand(halfwords));
   }
@@ -609,7 +606,7 @@
 
   void breakpoint(bool do_print) {
     if (do_print) {
-      printf("DebugBreak is inserted to %p\n", pc_);
+      PrintF("DebugBreak is inserted to %p\n", static_cast<void*>(pc_));
     }
 #if V8_HOST_ARCH_64_BIT
     int64_t value = reinterpret_cast<uint64_t>(&v8::base::OS::DebugBreak);
diff --git a/src/s390/builtins-s390.cc b/src/s390/builtins-s390.cc
index 8c2283f..f0059bc 100644
--- a/src/s390/builtins-s390.cc
+++ b/src/s390/builtins-s390.cc
@@ -15,8 +15,7 @@
 
 #define __ ACCESS_MASM(masm)
 
-void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id,
-                                BuiltinExtraArguments extra_args) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
   // ----------- S t a t e -------------
   //  -- r2                 : number of arguments excluding receiver
   //  -- r3                 : target
@@ -35,24 +34,8 @@
   __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
 
   // Insert extra arguments.
-  int num_extra_args = 0;
-  switch (extra_args) {
-    case BuiltinExtraArguments::kTarget:
-      __ Push(r3);
-      ++num_extra_args;
-      break;
-    case BuiltinExtraArguments::kNewTarget:
-      __ Push(r5);
-      ++num_extra_args;
-      break;
-    case BuiltinExtraArguments::kTargetAndNewTarget:
-      __ Push(r3, r5);
-      num_extra_args += 2;
-      break;
-    case BuiltinExtraArguments::kNone:
-      break;
-  }
-
+  const int num_extra_args = 2;
+  __ Push(r3, r5);
   // JumpToExternalReference expects r2 to contain the number of arguments
   // including the receiver and the extra arguments.
   __ AddP(r2, r2, Operand(num_extra_args + 1));
@@ -132,6 +115,8 @@
 void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
   // ----------- S t a t e -------------
   //  -- r2                 : number of arguments
+  //  -- r3                 : function
+  //  -- cp                 : context
   //  -- lr                 : return address
   //  -- sp[(argc - n) * 8] : arg[n] (zero-based)
   //  -- sp[(argc + 1) * 8] : receiver
@@ -143,59 +128,63 @@
   DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? d2 : d1;
 
   // Load the accumulator with the default return value (either -Infinity or
-  // +Infinity), with the tagged value in r3 and the double value in d1.
-  __ LoadRoot(r3, root_index);
-  __ LoadDouble(d1, FieldMemOperand(r3, HeapNumber::kValueOffset));
+  // +Infinity), with the tagged value in r7 and the double value in d1.
+  __ LoadRoot(r7, root_index);
+  __ LoadDouble(d1, FieldMemOperand(r7, HeapNumber::kValueOffset));
 
   // Setup state for loop
   // r4: address of arg[0] + kPointerSize
   // r5: number of slots to drop at exit (arguments + receiver)
-  __ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2));
-  __ AddP(r4, sp, r4);
-  __ AddP(r5, r2, Operand(1));
+  __ AddP(r6, r2, Operand(1));
 
   Label done_loop, loop;
   __ bind(&loop);
   {
     // Check if all parameters done.
-    __ CmpLogicalP(r4, sp);
-    __ ble(&done_loop);
+    __ SubP(r2, Operand(1));
+    __ blt(&done_loop);
 
     // Load the next parameter tagged value into r2.
-    __ lay(r4, MemOperand(r4, -kPointerSize));
-    __ LoadP(r2, MemOperand(r4));
+    __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
+    __ LoadP(r4, MemOperand(sp, r1));
 
     // Load the double value of the parameter into d2, maybe converting the
-    // parameter to a number first using the ToNumberStub if necessary.
+    // parameter to a number first using the ToNumber builtin if necessary.
     Label convert, convert_smi, convert_number, done_convert;
     __ bind(&convert);
-    __ JumpIfSmi(r2, &convert_smi);
-    __ LoadP(r6, FieldMemOperand(r2, HeapObject::kMapOffset));
-    __ JumpIfRoot(r6, Heap::kHeapNumberMapRootIndex, &convert_number);
+    __ JumpIfSmi(r4, &convert_smi);
+    __ LoadP(r5, FieldMemOperand(r4, HeapObject::kMapOffset));
+    __ JumpIfRoot(r5, Heap::kHeapNumberMapRootIndex, &convert_number);
     {
-      // Parameter is not a Number, use the ToNumberStub to convert it.
-      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-      __ SmiTag(r5);
-      __ Push(r3, r4, r5);
-      ToNumberStub stub(masm->isolate());
-      __ CallStub(&stub);
-      __ Pop(r3, r4, r5);
-      __ SmiUntag(r5);
+      // Parameter is not a Number, use the ToNumber builtin to convert it.
+      DCHECK(!FLAG_enable_embedded_constant_pool);
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ PushStandardFrame(r3);
+      __ SmiTag(r2);
+      __ SmiTag(r6);
+      __ Push(r2, r6, r7);
+      __ LoadRR(r2, r4);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+      __ LoadRR(r4, r2);
+      __ Pop(r2, r6, r7);
       {
         // Restore the double accumulator value (d1).
         Label done_restore;
-        __ SmiToDouble(d1, r3);
-        __ JumpIfSmi(r3, &done_restore);
-        __ LoadDouble(d1, FieldMemOperand(r3, HeapNumber::kValueOffset));
+        __ SmiToDouble(d1, r7);
+        __ JumpIfSmi(r7, &done_restore);
+        __ LoadDouble(d1, FieldMemOperand(r7, HeapNumber::kValueOffset));
         __ bind(&done_restore);
       }
+      __ SmiUntag(r6);
+      __ SmiUntag(r2);
+      __ Pop(r14, fp, cp, r3);
     }
     __ b(&convert);
     __ bind(&convert_number);
-    __ LoadDouble(d2, FieldMemOperand(r2, HeapNumber::kValueOffset));
+    __ LoadDouble(d2, FieldMemOperand(r4, HeapNumber::kValueOffset));
     __ b(&done_convert);
     __ bind(&convert_smi);
-    __ SmiToDouble(d2, r2);
+    __ SmiToDouble(d2, r4);
     __ bind(&done_convert);
 
     // Perform the actual comparison with the accumulator value on the left hand
@@ -207,26 +196,26 @@
     __ b(CommuteCondition(cond_done), &compare_swap);
 
     // Left and right hand side are equal, check for -0 vs. +0.
-    __ TestDoubleIsMinusZero(reg, r6, r7);
+    __ TestDoubleIsMinusZero(reg, r1, r0);
     __ bne(&loop);
 
     // Update accumulator. Result is on the right hand side.
     __ bind(&compare_swap);
     __ ldr(d1, d2);
-    __ LoadRR(r3, r2);
+    __ LoadRR(r7, r4);
     __ b(&loop);
 
     // At least one side is NaN, which means that the result will be NaN too.
     // We still need to visit the rest of the arguments.
     __ bind(&compare_nan);
-    __ LoadRoot(r3, Heap::kNanValueRootIndex);
-    __ LoadDouble(d1, FieldMemOperand(r3, HeapNumber::kValueOffset));
+    __ LoadRoot(r7, Heap::kNanValueRootIndex);
+    __ LoadDouble(d1, FieldMemOperand(r7, HeapNumber::kValueOffset));
     __ b(&loop);
   }
 
   __ bind(&done_loop);
-  __ LoadRR(r2, r3);
-  __ Drop(r5);
+  __ LoadRR(r2, r7);
+  __ Drop(r6);
   __ Ret();
 }
 
@@ -254,8 +243,7 @@
   }
 
   // 2a. Convert the first argument to a number.
-  ToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
+  __ Jump(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
 
   // 2b. No arguments, return +0.
   __ bind(&no_arguments);
@@ -305,8 +293,7 @@
       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
       __ Push(r3, r5);
       __ LoadRR(r2, r4);
-      ToNumberStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
       __ LoadRR(r4, r2);
       __ Pop(r3, r5);
     }
@@ -699,8 +686,9 @@
   __ AssertGeneratorObject(r3);
 
   // Store input value into generator object.
-  __ StoreP(r2, FieldMemOperand(r3, JSGeneratorObject::kInputOffset), r0);
-  __ RecordWriteField(r3, JSGeneratorObject::kInputOffset, r2, r5,
+  __ StoreP(r2, FieldMemOperand(r3, JSGeneratorObject::kInputOrDebugPosOffset),
+            r0);
+  __ RecordWriteField(r3, JSGeneratorObject::kInputOrDebugPosOffset, r2, r5,
                       kLRHasNotBeenSaved, kDontSaveFPRegs);
 
   // Store resume mode into generator object.
@@ -711,21 +699,26 @@
   __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
 
   // Flood function if we are stepping.
-  Label skip_flooding;
-  ExternalReference step_in_enabled =
-      ExternalReference::debug_step_in_enabled_address(masm->isolate());
-  __ mov(ip, Operand(step_in_enabled));
-  __ LoadlB(ip, MemOperand(ip));
-  __ CmpP(ip, Operand::Zero());
-  __ beq(&skip_flooding);
-  {
-    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    __ Push(r3, r4, r6);
-    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
-    __ Pop(r3, r4);
-    __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
-  }
-  __ bind(&skip_flooding);
+  Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+  Label stepping_prepared;
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(masm->isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  __ mov(ip, Operand(last_step_action));
+  __ LoadB(ip, MemOperand(ip));
+  __ CmpP(ip, Operand(StepIn));
+  __ bge(&prepare_step_in_if_stepping);
+
+  // Flood function if we need to continue stepping in the suspended generator.
+
+  ExternalReference debug_suspended_generator =
+      ExternalReference::debug_suspended_generator_address(masm->isolate());
+
+  __ mov(ip, Operand(debug_suspended_generator));
+  __ LoadP(ip, MemOperand(ip));
+  __ CmpP(ip, r3);
+  __ beq(&prepare_step_in_suspended_generator);
+  __ bind(&stepping_prepared);
 
   // Push receiver.
   __ LoadP(ip, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
@@ -830,6 +823,26 @@
       __ Jump(r5);
     }
   }
+
+  __ bind(&prepare_step_in_if_stepping);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r3, r4, r6);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(r3, r4);
+    __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
+  }
+  __ b(&stepping_prepared);
+
+  __ bind(&prepare_step_in_suspended_generator);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r3, r4);
+    __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+    __ Pop(r3, r4);
+    __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
+  }
+  __ b(&stepping_prepared);
 }
 
 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
@@ -957,6 +970,21 @@
   Generate_JSEntryTrampolineHelper(masm, true);
 }
 
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
+  Register args_count = scratch;
+
+  // Get the arguments + receiver count.
+  __ LoadP(args_count,
+           MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ LoadlW(args_count,
+            FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+  // Leave the frame (also dropping the register file).
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+
+  __ AddP(sp, sp, args_count);
+}
+
 // Generate code for entering a JS function with the interpreter.
 // On entry to the function the receiver and arguments have been pushed on the
 // stack left to right.  The actual argument count matches the formal parameter
@@ -1067,15 +1095,7 @@
   masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
 
   // The return value is in r2.
-
-  // Get the arguments + reciever count.
-  __ LoadP(r4, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
-  __ LoadlW(r4, FieldMemOperand(r4, BytecodeArray::kParameterSizeOffset));
-
-  // Leave the frame (also dropping the register file).
-  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
-
-  __ lay(sp, MemOperand(sp, r4));
+  LeaveInterpreterFrame(masm, r4);
   __ Ret();
 
   // If the bytecode array is no longer present, then the underlying function
@@ -1091,6 +1111,31 @@
   __ JumpToJSEntry(r6);
 }
 
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+  // Save the function and context for call to CompileBaseline.
+  __ LoadP(r3, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+  __ LoadP(kContextRegister,
+           MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+  // Leave the frame before recompiling for baseline so that we don't count as
+  // an activation on the stack.
+  LeaveInterpreterFrame(masm, r4);
+
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    // Push return value.
+    __ push(r2);
+
+    // Push function as argument and compile for baseline.
+    __ push(r3);
+    __ CallRuntime(Runtime::kCompileBaseline);
+
+    // Restore return value.
+    __ pop(r2);
+  }
+  __ Ret();
+}
+
 static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
                                          Register count, Register scratch) {
   Label loop;
@@ -1656,6 +1701,10 @@
 void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
                                                int field_index) {
   // ----------- S t a t e -------------
+  //  -- r2    : number of arguments
+  //  -- r3    : function
+  //  -- cp    : context
+
   //  -- lr    : return address
   //  -- sp[0] : receiver
   // -----------------------------------
@@ -1665,7 +1714,7 @@
   {
     __ Pop(r2);
     __ JumpIfSmi(r2, &receiver_not_date);
-    __ CompareObjectType(r2, r3, r4, JS_DATE_TYPE);
+    __ CompareObjectType(r2, r4, r5, JS_DATE_TYPE);
     __ bne(&receiver_not_date);
   }
 
@@ -1695,7 +1744,14 @@
 
   // 3. Raise a TypeError if the receiver is not a date.
   __ bind(&receiver_not_date);
-  __ TailCallRuntime(Runtime::kThrowNotDateError);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ push(r2);
+    __ PushStandardFrame(r3);
+    __ LoadSmiLiteral(r6, Smi::FromInt(0));
+    __ push(r6);
+    __ CallRuntime(Runtime::kThrowNotDateError);
+  }
 }
 
 // static
@@ -2661,6 +2717,76 @@
   __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
 }
 
+// static
+void Builtins::Generate_StringToNumber(MacroAssembler* masm) {
+  // The StringToNumber stub takes one argument in r2.
+  __ AssertString(r2);
+
+  // Check if string has a cached array index.
+  Label runtime;
+  __ LoadlW(r4, FieldMemOperand(r2, String::kHashFieldOffset));
+  __ And(r0, r4, Operand(String::kContainsCachedArrayIndexMask));
+  __ bne(&runtime);
+  __ IndexFromHash(r4, r2);
+  __ Ret();
+
+  __ bind(&runtime);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    // Push argument.
+    __ push(r2);
+    // We cannot use a tail call here because this builtin can also be called
+    // from wasm.
+    __ CallRuntime(Runtime::kStringToNumber);
+  }
+  __ Ret();
+}
+
+// static
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+  // The ToNumber stub takes one argument in r2.
+  STATIC_ASSERT(kSmiTag == 0);
+  __ TestIfSmi(r2);
+  __ Ret(eq);
+
+  __ CompareObjectType(r2, r3, r3, HEAP_NUMBER_TYPE);
+  // r2: receiver
+  // r3: receiver instance type
+  __ Ret(eq);
+
+  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+          RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_NonNumberToNumber(MacroAssembler* masm) {
+  // The NonNumberToNumber stub takes one argument in r2.
+  __ AssertNotNumber(r2);
+
+  __ CompareObjectType(r2, r3, r3, FIRST_NONSTRING_TYPE);
+  // r2: receiver
+  // r3: receiver instance type
+  __ Jump(masm->isolate()->builtins()->StringToNumber(), RelocInfo::CODE_TARGET,
+          lt);
+
+  Label not_oddball;
+  __ CmpP(r3, Operand(ODDBALL_TYPE));
+  __ bne(&not_oddball);
+  __ LoadP(r2, FieldMemOperand(r2, Oddball::kToNumberOffset));
+  __ Ret();
+  __ bind(&not_oddball);
+
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    // Push argument.
+    __ push(r2);
+    // We cannot use a tail call here because this builtin can also be called
+    // from wasm.
+    __ CallRuntime(Runtime::kToNumber);
+  }
+  __ Ret();
+}
+
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r2 : actual number of arguments
diff --git a/src/s390/code-stubs-s390.cc b/src/s390/code-stubs-s390.cc
index e1e2003..6098c37 100644
--- a/src/s390/code-stubs-s390.cc
+++ b/src/s390/code-stubs-s390.cc
@@ -21,44 +21,15 @@
 namespace v8 {
 namespace internal {
 
-static void InitializeArrayConstructorDescriptor(
-    Isolate* isolate, CodeStubDescriptor* descriptor,
-    int constant_stack_parameter_count) {
-  Address deopt_handler =
-      Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+#define __ ACCESS_MASM(masm)
 
-  if (constant_stack_parameter_count == 0) {
-    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  } else {
-    descriptor->Initialize(r2, deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  }
-}
-
-static void InitializeInternalArrayConstructorDescriptor(
-    Isolate* isolate, CodeStubDescriptor* descriptor,
-    int constant_stack_parameter_count) {
-  Address deopt_handler =
-      Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
-
-  if (constant_stack_parameter_count == 0) {
-    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  } else {
-    descriptor->Initialize(r2, deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  }
-}
-
-void ArraySingleArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-void ArrayNArgumentsConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
+void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
+  __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
+  __ StoreP(r3, MemOperand(sp, r1));
+  __ push(r3);
+  __ push(r4);
+  __ AddP(r2, r2, Operand(3));
+  __ TailCallRuntime(Runtime::kNewArray);
 }
 
 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
@@ -66,18 +37,12 @@
   descriptor->Initialize(r2, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
 }
 
-void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+void FastFunctionBindStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
+  Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
+  descriptor->Initialize(r2, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
 }
 
-void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-#define __ ACCESS_MASM(masm)
-
 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
                                           Condition cond);
 static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
@@ -956,7 +921,7 @@
   CEntryStub::GenerateAheadOfTime(isolate);
   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
-  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+  CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
   CreateWeakCellStub::GenerateAheadOfTime(isolate);
   BinaryOpICStub::GenerateAheadOfTime(isolate);
@@ -1070,7 +1035,6 @@
     // zLinux ABI requires caller's frame to have sufficient space for callee
     // preserved regsiter save area.
     // __ lay(sp, MemOperand(sp, -kCalleeRegisterSaveAreaSize));
-    __ positions_recorder()->WriteRecordedPositions();
     __ b(target);
     __ bind(&return_label);
     // __ la(sp, MemOperand(sp, +kCalleeRegisterSaveAreaSize));
@@ -1401,7 +1365,6 @@
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
                                           &miss,  // When index out of range.
-                                          STRING_INDEX_IS_ARRAY_INDEX,
                                           RECEIVER_IS_STRING);
   char_at_generator.GenerateFast(masm);
   __ Ret();
@@ -1864,12 +1827,15 @@
   // r4 : feedback vector
   // r5 : slot in feedback vector (Smi)
   Label initialize, done, miss, megamorphic, not_array_function;
+  Label done_initialize_count, done_increment_count;
 
   DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
             masm->isolate()->heap()->megamorphic_symbol());
   DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
             masm->isolate()->heap()->uninitialized_symbol());
 
+  const int count_offset = FixedArray::kHeaderSize + kPointerSize;
+
   // Load the cache state into r7.
   __ SmiToPtrArrayOffset(r7, r5);
   __ AddP(r7, r4, r7);
@@ -1884,9 +1850,9 @@
   Register weak_value = r9;
   __ LoadP(weak_value, FieldMemOperand(r7, WeakCell::kValueOffset));
   __ CmpP(r3, weak_value);
-  __ beq(&done);
+  __ beq(&done_increment_count, Label::kNear);
   __ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex);
-  __ beq(&done);
+  __ beq(&done, Label::kNear);
   __ LoadP(feedback_map, FieldMemOperand(r7, HeapObject::kMapOffset));
   __ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
   __ bne(&check_allocation_site);
@@ -1907,7 +1873,7 @@
   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
   __ CmpP(r3, r7);
   __ bne(&megamorphic);
-  __ b(&done);
+  __ b(&done_increment_count, Label::kNear);
 
   __ bind(&miss);
 
@@ -1937,12 +1903,31 @@
   // slot.
   CreateAllocationSiteStub create_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &create_stub);
-  __ b(&done);
+  __ b(&done_initialize_count, Label::kNear);
 
   __ bind(&not_array_function);
 
   CreateWeakCellStub weak_cell_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &weak_cell_stub);
+
+  __ bind(&done_initialize_count);
+  // Initialize the call counter.
+  __ LoadSmiLiteral(r7, Smi::FromInt(1));
+  __ SmiToPtrArrayOffset(r6, r5);
+  __ AddP(r6, r4, r6);
+  __ StoreP(r7, FieldMemOperand(r6, count_offset), r0);
+  __ b(&done, Label::kNear);
+
+  __ bind(&done_increment_count);
+
+  // Increment the call count for monomorphic function calls.
+  __ SmiToPtrArrayOffset(r7, r5);
+  __ AddP(r7, r4, r7);
+
+  __ LoadP(r6, FieldMemOperand(r7, count_offset));
+  __ AddSmiLiteral(r6, r6, Smi::FromInt(1), r0);
+  __ StoreP(r6, FieldMemOperand(r7, count_offset), r0);
+
   __ bind(&done);
 }
 
@@ -2005,7 +1990,7 @@
   __ SmiToPtrArrayOffset(r7, r5);
   __ AddP(r4, r4, r7);
   __ LoadP(r5, FieldMemOperand(r4, count_offset));
-  __ AddSmiLiteral(r5, r5, Smi::FromInt(CallICNexus::kCallCountIncrement), r0);
+  __ AddSmiLiteral(r5, r5, Smi::FromInt(1), r0);
   __ StoreP(r5, FieldMemOperand(r4, count_offset), r0);
 
   __ LoadRR(r4, r6);
@@ -2052,7 +2037,7 @@
   // Increment the call count for monomorphic function calls.
   const int count_offset = FixedArray::kHeaderSize + kPointerSize;
   __ LoadP(r5, FieldMemOperand(r8, count_offset));
-  __ AddSmiLiteral(r5, r5, Smi::FromInt(CallICNexus::kCallCountIncrement), r0);
+  __ AddSmiLiteral(r5, r5, Smi::FromInt(1), r0);
   __ StoreP(r5, FieldMemOperand(r8, count_offset), r0);
 
   __ bind(&call_function);
@@ -2122,7 +2107,7 @@
   __ bne(&miss);
 
   // Initialize the call counter.
-  __ LoadSmiLiteral(r7, Smi::FromInt(CallICNexus::kCallCountIncrement));
+  __ LoadSmiLiteral(r7, Smi::FromInt(1));
   __ StoreP(r7, FieldMemOperand(r8, count_offset), r0);
 
   // Store the function. Use a stub since we need a frame for allocation.
@@ -2211,13 +2196,7 @@
     // index_ is consumed by runtime conversion function.
     __ Push(object_, index_);
   }
-  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
-    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
-  } else {
-    DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
-    // NumberToSmi discards numbers that are not exact integers.
-    __ CallRuntime(Runtime::kNumberToSmi);
-  }
+  __ CallRuntime(Runtime::kNumberToSmi);
   // Save the conversion result before the pop instructions below
   // have a chance to overwrite it.
   __ Move(index_, r2);
@@ -2548,69 +2527,13 @@
   // r5: from index (untagged)
   __ SmiTag(r5, r5);
   StringCharAtGenerator generator(r2, r5, r4, r2, &runtime, &runtime, &runtime,
-                                  STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
+                                  RECEIVER_IS_STRING);
   generator.GenerateFast(masm);
   __ Drop(3);
   __ Ret();
   generator.SkipSlow(masm, &runtime);
 }
 
-void ToNumberStub::Generate(MacroAssembler* masm) {
-  // The ToNumber stub takes one argument in r2.
-  STATIC_ASSERT(kSmiTag == 0);
-  __ TestIfSmi(r2);
-  __ Ret(eq);
-
-  __ CompareObjectType(r2, r3, r3, HEAP_NUMBER_TYPE);
-  // r2: receiver
-  // r3: receiver instance type
-  Label not_heap_number;
-  __ bne(&not_heap_number);
-  __ Ret();
-  __ bind(&not_heap_number);
-
-  NonNumberToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
-}
-
-void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
-  // The NonNumberToNumber stub takes one argument in r2.
-  __ AssertNotNumber(r2);
-
-  __ CompareObjectType(r2, r3, r3, FIRST_NONSTRING_TYPE);
-  // r2: receiver
-  // r3: receiver instance type
-  StringToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub, lt);
-
-  Label not_oddball;
-  __ CmpP(r3, Operand(ODDBALL_TYPE));
-  __ bne(&not_oddball, Label::kNear);
-  __ LoadP(r2, FieldMemOperand(r2, Oddball::kToNumberOffset));
-  __ b(r14);
-  __ bind(&not_oddball);
-
-  __ push(r2);  // Push argument.
-  __ TailCallRuntime(Runtime::kToNumber);
-}
-
-void StringToNumberStub::Generate(MacroAssembler* masm) {
-  // The StringToNumber stub takes one argument in r2.
-  __ AssertString(r2);
-
-  // Check if string has a cached array index.
-  Label runtime;
-  __ LoadlW(r4, FieldMemOperand(r2, String::kHashFieldOffset));
-  __ And(r0, r4, Operand(String::kContainsCachedArrayIndexMask));
-  __ bne(&runtime);
-  __ IndexFromHash(r4, r2);
-  __ Ret();
-
-  __ bind(&runtime);
-  __ push(r2);  // Push argument.
-  __ TailCallRuntime(Runtime::kStringToNumber);
-}
-
 void ToStringStub::Generate(MacroAssembler* masm) {
   // The ToString stub takes one argument in r2.
   Label done;
@@ -2788,7 +2711,7 @@
   // Load r4 with the allocation site.  We stick an undefined dummy value here
   // and replace it with the real allocation site later when we instantiate this
   // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
-  __ Move(r4, handle(isolate()->heap()->undefined_value()));
+  __ Move(r4, isolate()->factory()->undefined_value());
 
   // Make sure that we actually patched the allocation site.
   if (FLAG_debug_code) {
@@ -3170,10 +3093,6 @@
 void DirectCEntryStub::Generate(MacroAssembler* masm) {
   __ CleanseP(r14);
 
-  // Statement positions are expected to be recorded when the target
-  // address is loaded.
-  __ positions_recorder()->WriteRecordedPositions();
-
   __ b(ip);  // Callee will return to R14 directly
 }
 
@@ -3627,13 +3546,13 @@
 
 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  LoadICStub stub(isolate(), state());
+  LoadICStub stub(isolate());
   stub.GenerateForTrampoline(masm);
 }
 
 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  KeyedLoadICStub stub(isolate(), state());
+  KeyedLoadICStub stub(isolate());
   stub.GenerateForTrampoline(masm);
 }
 
@@ -4273,17 +4192,11 @@
   }
 }
 
-void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
       isolate);
-  ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
-      isolate);
-  ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
-      isolate);
-}
-
-void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
-    Isolate* isolate) {
+  ArrayNArgumentsConstructorStub stub(isolate);
+  stub.GetCode();
   ElementsKind kinds[2] = {FAST_ELEMENTS, FAST_HOLEY_ELEMENTS};
   for (int i = 0; i < 2; i++) {
     // For internal arrays we only need a few things
@@ -4291,8 +4204,6 @@
     stubh1.GetCode();
     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
     stubh2.GetCode();
-    InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
-    stubh3.GetCode();
   }
 }
 
@@ -4310,13 +4221,15 @@
     CreateArrayDispatchOneArgument(masm, mode);
 
     __ bind(&not_one_case);
-    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+    ArrayNArgumentsConstructorStub stub(masm->isolate());
+    __ TailCallStub(&stub);
   } else if (argument_count() == NONE) {
     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
   } else if (argument_count() == ONE) {
     CreateArrayDispatchOneArgument(masm, mode);
   } else if (argument_count() == MORE_THAN_ONE) {
-    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+    ArrayNArgumentsConstructorStub stub(masm->isolate());
+    __ TailCallStub(&stub);
   } else {
     UNREACHABLE();
   }
@@ -4398,7 +4311,7 @@
   InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
   __ TailCallStub(&stub0, lt);
 
-  InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+  ArrayNArgumentsConstructorStub stubN(isolate());
   __ TailCallStub(&stubN, gt);
 
   if (IsFastPackedElementsKind(kind)) {
@@ -4617,13 +4530,13 @@
   // specified by the function's internal formal parameter count.
   Label rest_parameters;
   __ LoadP(r2, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ LoadP(r3, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(r5, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
   __ LoadW(
-      r3, FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+      r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
 #if V8_TARGET_ARCH_S390X
-  __ SmiTag(r3);
+  __ SmiTag(r5);
 #endif
-  __ SubP(r2, r2, r3);
+  __ SubP(r2, r2, r5);
   __ bgt(&rest_parameters);
 
   // Return an empty rest parameter array.
@@ -4670,6 +4583,7 @@
     // ----------- S t a t e -------------
     //  -- cp : context
     //  -- r2 : number of rest parameters (tagged)
+    //  -- r3 : function
     //  -- r4 : pointer just past first rest parameters
     //  -- r8 : size of rest parameters
     //  -- lr : return address
@@ -4677,9 +4591,9 @@
 
     // Allocate space for the rest parameter array plus the backing store.
     Label allocate, done_allocate;
-    __ mov(r3, Operand(JSArray::kSize + FixedArray::kHeaderSize));
-    __ AddP(r3, r3, r8);
-    __ Allocate(r3, r5, r6, r7, &allocate, NO_ALLOCATION_FLAGS);
+    __ mov(r9, Operand(JSArray::kSize + FixedArray::kHeaderSize));
+    __ AddP(r9, r9, r8);
+    __ Allocate(r9, r5, r6, r7, &allocate, NO_ALLOCATION_FLAGS);
     __ bind(&done_allocate);
 
     // Setup the elements array in r5.
@@ -4713,17 +4627,25 @@
     __ AddP(r2, r6, Operand(kHeapObjectTag));
     __ Ret();
 
-    // Fall back to %AllocateInNewSpace.
+    // Fall back to %AllocateInNewSpace (if not too big).
+    Label too_big_for_new_space;
     __ bind(&allocate);
+    __ CmpP(r9, Operand(Page::kMaxRegularHeapObjectSize));
+    __ bgt(&too_big_for_new_space);
     {
       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-      __ SmiTag(r3);
-      __ Push(r2, r4, r3);
+      __ SmiTag(r9);
+      __ Push(r2, r4, r9);
       __ CallRuntime(Runtime::kAllocateInNewSpace);
       __ LoadRR(r5, r2);
       __ Pop(r2, r4);
     }
     __ b(&done_allocate);
+
+    // Fall back to %NewRestParameter.
+    __ bind(&too_big_for_new_space);
+    __ push(r3);
+    __ TailCallRuntime(Runtime::kNewRestParameter);
   }
 }
 
@@ -5013,9 +4935,9 @@
   __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
   __ beq(&arguments_adaptor);
   {
-    __ LoadP(r3, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+    __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
     __ LoadW(r2, FieldMemOperand(
-                     r3, SharedFunctionInfo::kFormalParameterCountOffset));
+                     r6, SharedFunctionInfo::kFormalParameterCountOffset));
 #if V8_TARGET_ARCH_S390X
     __ SmiTag(r2);
 #endif
@@ -5035,6 +4957,7 @@
   // ----------- S t a t e -------------
   //  -- cp : context
   //  -- r2 : number of rest parameters (tagged)
+  //  -- r3 : function
   //  -- r4 : pointer just past first rest parameters
   //  -- r8 : size of rest parameters
   //  -- lr : return address
@@ -5042,9 +4965,9 @@
 
   // Allocate space for the strict arguments object plus the backing store.
   Label allocate, done_allocate;
-  __ mov(r3, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
-  __ AddP(r3, r3, r8);
-  __ Allocate(r3, r5, r6, r7, &allocate, NO_ALLOCATION_FLAGS);
+  __ mov(r9, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+  __ AddP(r9, r9, r8);
+  __ Allocate(r9, r5, r6, r7, &allocate, NO_ALLOCATION_FLAGS);
   __ bind(&done_allocate);
 
   // Setup the elements array in r5.
@@ -5079,47 +5002,25 @@
   __ AddP(r2, r6, Operand(kHeapObjectTag));
   __ Ret();
 
-  // Fall back to %AllocateInNewSpace.
+  // Fall back to %AllocateInNewSpace (if not too big).
+  Label too_big_for_new_space;
   __ bind(&allocate);
+  __ CmpP(r9, Operand(Page::kMaxRegularHeapObjectSize));
+  __ bgt(&too_big_for_new_space);
   {
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    __ SmiTag(r3);
-    __ Push(r2, r4, r3);
+    __ SmiTag(r9);
+    __ Push(r2, r4, r9);
     __ CallRuntime(Runtime::kAllocateInNewSpace);
     __ LoadRR(r5, r2);
     __ Pop(r2, r4);
   }
   __ b(&done_allocate);
-}
 
-void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
-  Register context = cp;
-  Register result = r2;
-  Register slot = r4;
-
-  // Go up the context chain to the script context.
-  for (int i = 0; i < depth(); ++i) {
-    __ LoadP(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
-    context = result;
-  }
-
-  // Load the PropertyCell value at the specified slot.
-  __ ShiftLeftP(r0, slot, Operand(kPointerSizeLog2));
-  __ AddP(result, context, r0);
-  __ LoadP(result, ContextMemOperand(result));
-  __ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset));
-
-  // If the result is not the_hole, return. Otherwise, handle in the runtime.
-  __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
-  Label runtime;
-  __ beq(&runtime);
-  __ Ret();
-  __ bind(&runtime);
-
-  // Fallback to runtime.
-  __ SmiTag(slot);
-  __ Push(slot);
-  __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
+  // Fall back to %NewStrictArguments.
+  __ bind(&too_big_for_new_space);
+  __ push(r3);
+  __ TailCallRuntime(Runtime::kNewStrictArguments);
 }
 
 void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
diff --git a/src/s390/codegen-s390.cc b/src/s390/codegen-s390.cc
index fe94c94..5728e45 100644
--- a/src/s390/codegen-s390.cc
+++ b/src/s390/codegen-s390.cc
@@ -15,56 +15,6 @@
 
 #define __ masm.
 
-#if defined(USE_SIMULATOR)
-byte* fast_exp_s390_machine_code = nullptr;
-double fast_exp_simulator(double x, Isolate* isolate) {
-  return Simulator::current(isolate)->CallFPReturnsDouble(
-      fast_exp_s390_machine_code, x, 0);
-}
-#endif
-
-UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
-  size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
-  if (buffer == nullptr) return nullptr;
-  ExternalReference::InitializeMathExpData();
-
-  MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
-                      CodeObjectRequired::kNo);
-
-  {
-    DoubleRegister input = d0;
-    DoubleRegister result = d2;
-    DoubleRegister double_scratch1 = d3;
-    DoubleRegister double_scratch2 = d4;
-    Register temp1 = r6;
-    Register temp2 = r7;
-    Register temp3 = r8;
-
-    __ Push(temp3, temp2, temp1);
-    MathExpGenerator::EmitMathExp(&masm, input, result, double_scratch1,
-                                  double_scratch2, temp1, temp2, temp3);
-    __ Pop(temp3, temp2, temp1);
-    __ ldr(d0, result);
-    __ Ret();
-  }
-
-  CodeDesc desc;
-  masm.GetCode(&desc);
-  DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
-
-  Assembler::FlushICache(isolate, buffer, actual_size);
-  base::OS::ProtectCode(buffer, actual_size);
-
-#if !defined(USE_SIMULATOR)
-  return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
-#else
-  fast_exp_s390_machine_code = buffer;
-  return &fast_exp_simulator;
-#endif
-}
-
 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
 #if defined(USE_SIMULATOR)
   return nullptr;
@@ -507,95 +457,6 @@
   __ bind(&done);
 }
 
-static MemOperand ExpConstant(int index, Register base) {
-  return MemOperand(base, index * kDoubleSize);
-}
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm, DoubleRegister input,
-                                   DoubleRegister result,
-                                   DoubleRegister double_scratch1,
-                                   DoubleRegister double_scratch2,
-                                   Register temp1, Register temp2,
-                                   Register temp3) {
-  DCHECK(!input.is(result));
-  DCHECK(!input.is(double_scratch1));
-  DCHECK(!input.is(double_scratch2));
-  DCHECK(!result.is(double_scratch1));
-  DCHECK(!result.is(double_scratch2));
-  DCHECK(!double_scratch1.is(double_scratch2));
-  DCHECK(!temp1.is(temp2));
-  DCHECK(!temp1.is(temp3));
-  DCHECK(!temp2.is(temp3));
-  DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
-  DCHECK(!masm->serializer_enabled());  // External references not serializable.
-
-  Label zero, infinity, done;
-
-  __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
-
-  __ LoadDouble(double_scratch1, ExpConstant(0, temp3));
-  __ cdbr(double_scratch1, input);
-  __ ldr(result, input);
-  __ bunordered(&done, Label::kNear);
-  __ bge(&zero, Label::kNear);
-
-  __ LoadDouble(double_scratch2, ExpConstant(1, temp3));
-  __ cdbr(input, double_scratch2);
-  __ bge(&infinity, Label::kNear);
-
-  __ LoadDouble(double_scratch1, ExpConstant(3, temp3));
-  __ LoadDouble(result, ExpConstant(4, temp3));
-
-  // Do not generate madbr, as intermediate result are not
-  // rounded properly
-  __ mdbr(double_scratch1, input);
-  __ adbr(double_scratch1, result);
-
-  // Move low word of double_scratch1 to temp2
-  __ lgdr(temp2, double_scratch1);
-  __ nihf(temp2, Operand::Zero());
-
-  __ sdbr(double_scratch1, result);
-  __ LoadDouble(result, ExpConstant(6, temp3));
-  __ LoadDouble(double_scratch2, ExpConstant(5, temp3));
-  __ mdbr(double_scratch1, double_scratch2);
-  __ sdbr(double_scratch1, input);
-  __ sdbr(result, double_scratch1);
-  __ ldr(double_scratch2, double_scratch1);
-  __ mdbr(double_scratch2, double_scratch2);
-  __ mdbr(result, double_scratch2);
-  __ LoadDouble(double_scratch2, ExpConstant(7, temp3));
-  __ mdbr(result, double_scratch2);
-  __ sdbr(result, double_scratch1);
-  __ LoadDouble(double_scratch2, ExpConstant(8, temp3));
-  __ adbr(result, double_scratch2);
-  __ ShiftRight(temp1, temp2, Operand(11));
-  __ AndP(temp2, Operand(0x7ff));
-  __ AddP(temp1, Operand(0x3ff));
-
-  // Must not call ExpConstant() after overwriting temp3!
-  __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
-  __ ShiftLeft(temp2, temp2, Operand(3));
-
-  __ lg(temp2, MemOperand(temp2, temp3));
-  __ sllg(temp1, temp1, Operand(52));
-  __ ogr(temp2, temp1);
-  __ ldgr(double_scratch1, temp2);
-
-  __ mdbr(result, double_scratch1);
-  __ b(&done, Label::kNear);
-
-  __ bind(&zero);
-  __ lzdr(kDoubleRegZero);
-  __ ldr(result, kDoubleRegZero);
-  __ b(&done, Label::kNear);
-
-  __ bind(&infinity);
-  __ LoadDouble(result, ExpConstant(2, temp3));
-
-  __ bind(&done);
-}
-
 #undef __
 
 CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
diff --git a/src/s390/codegen-s390.h b/src/s390/codegen-s390.h
index 18cf8e2..aaedb01 100644
--- a/src/s390/codegen-s390.h
+++ b/src/s390/codegen-s390.h
@@ -25,19 +25,6 @@
  private:
   DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
 };
-
-class MathExpGenerator : public AllStatic {
- public:
-  // Register input isn't modified. All other registers are clobbered.
-  static void EmitMathExp(MacroAssembler* masm, DoubleRegister input,
-                          DoubleRegister result, DoubleRegister double_scratch1,
-                          DoubleRegister double_scratch2, Register temp1,
-                          Register temp2, Register temp3);
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/s390/deoptimizer-s390.cc b/src/s390/deoptimizer-s390.cc
index 44062d6..6ee8c74 100644
--- a/src/s390/deoptimizer-s390.cc
+++ b/src/s390/deoptimizer-s390.cc
@@ -116,8 +116,7 @@
 
   // Save all double registers before messing with them.
   __ lay(sp, MemOperand(sp, -kDoubleRegsSize));
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
     int code = config->GetAllocatableDoubleCode(i);
     const DoubleRegister dreg = DoubleRegister::from_code(code);
diff --git a/src/s390/disasm-s390.cc b/src/s390/disasm-s390.cc
index 5bab604..d9cf2d3 100644
--- a/src/s390/disasm-s390.cc
+++ b/src/s390/disasm-s390.cc
@@ -37,6 +37,8 @@
 namespace v8 {
 namespace internal {
 
+const auto GetRegConfig = RegisterConfiguration::Crankshaft;
+
 //------------------------------------------------------------------------------
 
 // Decoder decodes and disassembles instructions into an output buffer.
@@ -111,7 +113,7 @@
 
 // Print the double FP register name according to the active name converter.
 void Decoder::PrintDRegister(int reg) {
-  Print(DoubleRegister::from_code(reg).ToString());
+  Print(GetRegConfig()->GetDoubleRegisterName(reg));
 }
 
 // Print SoftwareInterrupt codes. Factoring this out reduces the complexity of
@@ -1357,7 +1359,7 @@
 namespace disasm {
 
 const char* NameConverter::NameOfAddress(byte* addr) const {
-  v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+  v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
   return tmp_buffer_.start();
 }
 
@@ -1366,7 +1368,7 @@
 }
 
 const char* NameConverter::NameOfCPURegister(int reg) const {
-  return v8::internal::Register::from_code(reg).ToString();
+  return v8::internal::GetRegConfig()->GetGeneralRegisterName(reg);
 }
 
 const char* NameConverter::NameOfByteCPURegister(int reg) const {
@@ -1411,7 +1413,7 @@
     buffer[0] = '\0';
     byte* prev_pc = pc;
     pc += d.InstructionDecode(buffer, pc);
-    v8::internal::PrintF(f, "%p    %08x      %s\n", prev_pc,
+    v8::internal::PrintF(f, "%p    %08x      %s\n", static_cast<void*>(prev_pc),
                          *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
   }
 }
diff --git a/src/s390/interface-descriptors-s390.cc b/src/s390/interface-descriptors-s390.cc
index aae1949..d588fbe 100644
--- a/src/s390/interface-descriptors-s390.cc
+++ b/src/s390/interface-descriptors-s390.cc
@@ -11,6 +11,15 @@
 
 const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
 
+void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
+    CallInterfaceDescriptorData* data, int register_parameter_count) {
+  const Register default_stub_registers[] = {r2, r3, r4, r5, r6};
+  CHECK_LE(static_cast<size_t>(register_parameter_count),
+           arraysize(default_stub_registers));
+  data->InitializePlatformSpecific(register_parameter_count,
+                                   default_stub_registers);
+}
+
 const Register LoadDescriptor::ReceiverRegister() { return r3; }
 const Register LoadDescriptor::NameRegister() { return r4; }
 const Register LoadDescriptor::SlotRegister() { return r2; }
@@ -31,8 +40,6 @@
 
 const Register StoreTransitionDescriptor::MapRegister() { return r5; }
 
-const Register LoadGlobalViaContextDescriptor::SlotRegister() { return r4; }
-
 const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r4; }
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r2; }
 
@@ -51,9 +58,6 @@
 const Register GrowArrayElementsDescriptor::ObjectRegister() { return r2; }
 const Register GrowArrayElementsDescriptor::KeyRegister() { return r5; }
 
-const Register HasPropertyDescriptor::ObjectRegister() { return r2; }
-const Register HasPropertyDescriptor::KeyRegister() { return r5; }
-
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {r4};
@@ -222,41 +226,24 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
+void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // register state
   // r2 -- number of arguments
   // r3 -- function
   // r4 -- allocation site with elements kind
-  Register registers[] = {r3, r4};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
+  Register registers[] = {r3, r4, r2};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
+void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // stack param count needs (constructor pointer, and single argument)
   Register registers[] = {r3, r4, r2};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-void InternalArrayConstructorConstantArgCountDescriptor::
-    InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
-  // register state
-  // r2 -- number of arguments
-  // r3 -- constructor function
-  Register registers[] = {r3};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  // stack param count needs (constructor pointer, and single argument)
-  Register registers[] = {r3, r2};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastArrayPushDescriptor::InitializePlatformSpecific(
+void VarArgFunctionDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // stack param count needs (arg count)
   Register registers[] = {r2};
diff --git a/src/s390/macro-assembler-s390.cc b/src/s390/macro-assembler-s390.cc
index 9257e64..ca48614 100644
--- a/src/s390/macro-assembler-s390.cc
+++ b/src/s390/macro-assembler-s390.cc
@@ -70,10 +70,6 @@
   Label start;
   bind(&start);
 
-  // Statement positions are expected to be recorded when the target
-  // address is loaded.
-  positions_recorder()->WriteRecordedPositions();
-
   // Branch to target via indirect branch
   basr(r14, target);
 
@@ -122,10 +118,6 @@
   bind(&start);
 #endif
 
-  // Statement positions are expected to be recorded when the target
-  // address is loaded.
-  positions_recorder()->WriteRecordedPositions();
-
   mov(ip, Operand(reinterpret_cast<intptr_t>(target), rmode));
   basr(r14, ip);
 
@@ -645,8 +637,7 @@
 
 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
   // General purpose registers are pushed last on the stack.
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
   int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
   return MemOperand(sp, doubles_size + register_offset);
@@ -987,9 +978,8 @@
 
 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
   LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  LoadP(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
-  LoadP(vector,
-        FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+  LoadP(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
+  LoadP(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
 }
 
 void MacroAssembler::EnterFrame(StackFrame::Type type,
@@ -1310,12 +1300,13 @@
                                              const ParameterCount& expected,
                                              const ParameterCount& actual) {
   Label skip_flooding;
-  ExternalReference step_in_enabled =
-      ExternalReference::debug_step_in_enabled_address(isolate());
-  mov(r6, Operand(step_in_enabled));
-  LoadlB(r6, MemOperand(r6));
-  CmpP(r6, Operand::Zero());
-  beq(&skip_flooding);
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  mov(r6, Operand(last_step_action));
+  LoadB(r6, MemOperand(r6));
+  CmpP(r6, Operand(StepIn));
+  blt(&skip_flooding);
   {
     FrameScope frame(this,
                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -3705,8 +3696,7 @@
   if (reg5.is_valid()) regs |= reg5.bit();
   if (reg6.is_valid()) regs |= reg6.bit();
 
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
     int code = config->GetAllocatableGeneralCode(i);
     Register candidate = Register::from_code(code);
@@ -4757,7 +4747,6 @@
 // Branch On Count.  Decrement R1, and branch if R1 != 0.
 void MacroAssembler::BranchOnCount(Register r1, Label* l) {
   int32_t offset = branch_offset(l);
-  positions_recorder()->WriteRecordedPositions();
   if (is_int16(offset)) {
 #if V8_TARGET_ARCH_S390X
     brctg(r1, Operand(offset));
diff --git a/src/s390/macro-assembler-s390.h b/src/s390/macro-assembler-s390.h
index 19f0f7c..23b353e 100644
--- a/src/s390/macro-assembler-s390.h
+++ b/src/s390/macro-assembler-s390.h
@@ -1582,17 +1582,29 @@
   }
 
   void IndexToArrayOffset(Register dst, Register src, int elementSizeLog2,
-                          bool isSmi) {
+                          bool isSmi, bool keyMaybeNegative) {
     if (isSmi) {
       SmiToArrayOffset(dst, src, elementSizeLog2);
-    } else {
+    } else if (keyMaybeNegative ||
+          !CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
 #if V8_TARGET_ARCH_S390X
+      // If array access is dehoisted, the key, being an int32, can contain
+      // a negative value, as needs to be sign-extended to 64-bit for
+      // memory access.
+      //
       // src (key) is a 32-bit integer.  Sign extension ensures
       // upper 32-bit does not contain garbage before being used to
       // reference memory.
       lgfr(src, src);
 #endif
       ShiftLeftP(dst, src, Operand(elementSizeLog2));
+    } else {
+      // Small optimization to reduce pathlength.  After Bounds Check,
+      // the key is guaranteed to be non-negative.  Leverage RISBG,
+      // which also performs zero-extension.
+      risbg(dst, src, Operand(32 - elementSizeLog2),
+            Operand(63 - elementSizeLog2), Operand(elementSizeLog2),
+            true);
     }
   }
 
diff --git a/src/s390/simulator-s390.cc b/src/s390/simulator-s390.cc
index e819556..434fbff 100644
--- a/src/s390/simulator-s390.cc
+++ b/src/s390/simulator-s390.cc
@@ -23,6 +23,8 @@
 namespace v8 {
 namespace internal {
 
+const auto GetRegConfig = RegisterConfiguration::Crankshaft;
+
 // This macro provides a platform independent use of sscanf. The reason for
 // SScanF not being implemented in a platform independent way through
 // ::v8::internal::OS in the same way as SNPrintF is that the
@@ -331,7 +333,7 @@
             for (int i = 0; i < kNumRegisters; i++) {
               value = GetRegisterValue(i);
               PrintF("    %3s: %08" V8PRIxPTR,
-                     Register::from_code(i).ToString(), value);
+                     GetRegConfig()->GetGeneralRegisterName(i), value);
               if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
                   (i % 2) == 0) {
                 dvalue = GetRegisterPairDoubleValue(i);
@@ -346,7 +348,7 @@
             for (int i = 0; i < kNumRegisters; i++) {
               value = GetRegisterValue(i);
               PrintF("     %3s: %08" V8PRIxPTR " %11" V8PRIdPTR,
-                     Register::from_code(i).ToString(), value, value);
+                     GetRegConfig()->GetGeneralRegisterName(i), value, value);
               if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
                   (i % 2) == 0) {
                 dvalue = GetRegisterPairDoubleValue(i);
@@ -362,14 +364,15 @@
               float fvalue = GetFPFloatRegisterValue(i);
               uint32_t as_words = bit_cast<uint32_t>(fvalue);
               PrintF("%3s: %f 0x%08x\n",
-                     DoubleRegister::from_code(i).ToString(), fvalue, as_words);
+                     GetRegConfig()->GetDoubleRegisterName(i), fvalue,
+                     as_words);
             }
           } else if (strcmp(arg1, "alld") == 0) {
             for (int i = 0; i < DoubleRegister::kNumRegisters; i++) {
               dvalue = GetFPDoubleRegisterValue(i);
               uint64_t as_words = bit_cast<uint64_t>(dvalue);
               PrintF("%3s: %f 0x%08x %08x\n",
-                     DoubleRegister::from_code(i).ToString(), dvalue,
+                     GetRegConfig()->GetDoubleRegisterName(i), dvalue,
                      static_cast<uint32_t>(as_words >> 32),
                      static_cast<uint32_t>(as_words & 0xffffffff));
             }
@@ -701,7 +704,7 @@
   last_debugger_input_ = input;
 }
 
-void Simulator::FlushICache(v8::internal::HashMap* i_cache, void* start_addr,
+void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
                             size_t size) {
   intptr_t start = reinterpret_cast<intptr_t>(start_addr);
   int intra_line = (start & CachePage::kLineMask);
@@ -722,9 +725,8 @@
   }
 }
 
-CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
-  v8::internal::HashMap::Entry* entry =
-      i_cache->LookupOrInsert(page, ICacheHash(page));
+CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
+  base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
   if (entry->value == NULL) {
     CachePage* new_page = new CachePage();
     entry->value = new_page;
@@ -733,8 +735,7 @@
 }
 
 // Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
-                             int size) {
+void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start, int size) {
   DCHECK(size <= CachePage::kPageSize);
   DCHECK(AllOnOnePage(start, size - 1));
   DCHECK((start & CachePage::kLineMask) == 0);
@@ -746,8 +747,7 @@
   memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
 }
 
-void Simulator::CheckICache(v8::internal::HashMap* i_cache,
-                            Instruction* instr) {
+void Simulator::CheckICache(base::HashMap* i_cache, Instruction* instr) {
   intptr_t address = reinterpret_cast<intptr_t>(instr);
   void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
   void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@@ -1513,7 +1513,7 @@
 Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
   i_cache_ = isolate_->simulator_i_cache();
   if (i_cache_ == NULL) {
-    i_cache_ = new v8::internal::HashMap(&ICacheMatch);
+    i_cache_ = new base::HashMap(&ICacheMatch);
     isolate_->set_simulator_i_cache(i_cache_);
   }
   Initialize(isolate);
@@ -1654,10 +1654,10 @@
 };
 
 // static
-void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
+void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
   Redirection::DeleteChain(first);
   if (i_cache != nullptr) {
-    for (HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
+    for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
          entry = i_cache->Next(entry)) {
       delete static_cast<CachePage*>(entry->value);
     }
@@ -2028,15 +2028,17 @@
             case ExternalReference::BUILTIN_FP_FP_CALL:
             case ExternalReference::BUILTIN_COMPARE_CALL:
               PrintF("Call to host function at %p with args %f, %f",
-                     FUNCTION_ADDR(generic_target), dval0, dval1);
+                     static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0,
+                     dval1);
               break;
             case ExternalReference::BUILTIN_FP_CALL:
               PrintF("Call to host function at %p with arg %f",
-                     FUNCTION_ADDR(generic_target), dval0);
+                     static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0);
               break;
             case ExternalReference::BUILTIN_FP_INT_CALL:
               PrintF("Call to host function at %p with args %f, %" V8PRIdPTR,
-                     FUNCTION_ADDR(generic_target), dval0, ival);
+                     static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0,
+                     ival);
               break;
             default:
               UNREACHABLE();
@@ -2178,8 +2180,8 @@
               "Call to host function at %p,\n"
               "\t\t\t\targs %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
               ", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR,
-              FUNCTION_ADDR(target), arg[0], arg[1], arg[2], arg[3], arg[4],
-              arg[5]);
+              static_cast<void*>(FUNCTION_ADDR(target)), arg[0], arg[1], arg[2],
+              arg[3], arg[4], arg[5]);
           if (!stack_aligned) {
             PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
                    static_cast<intptr_t>(get_register(sp)));
@@ -5654,6 +5656,9 @@
 }
 
 void Simulator::CallInternal(byte* entry, int reg_arg_count) {
+  // Adjust JS-based stack limit to C-based stack limit.
+  isolate_->stack_guard()->AdjustStackLimitForSimulator();
+
   // Prepare to execute the code at entry
   if (ABI_USES_FUNCTION_DESCRIPTORS) {
     // entry is the function descriptor
@@ -5736,6 +5741,9 @@
 }
 
 intptr_t Simulator::Call(byte* entry, int argument_count, ...) {
+  // Adjust JS-based stack limit to C-based stack limit.
+  isolate_->stack_guard()->AdjustStackLimitForSimulator();
+
   // Remember the values of non-volatile registers.
   int64_t r6_val = get_register(r6);
   int64_t r7_val = get_register(r7);
@@ -5948,11 +5956,52 @@
   uint8_t imm_val = AS(SIInstruction)->I2Value();          \
   int length = 4;
 
+#define DECODE_SIL_INSTRUCTION(b1, d1, i2)     \
+  int b1 = AS(SILInstruction)->B1Value();      \
+  intptr_t d1 = AS(SILInstruction)->D1Value(); \
+  int16_t i2 = AS(SILInstruction)->I2Value();  \
+  int length = 6;
+
+#define DECODE_SIY_INSTRUCTION(b1, d1, i2)     \
+  int b1 = AS(SIYInstruction)->B1Value();      \
+  intptr_t d1 = AS(SIYInstruction)->D1Value(); \
+  uint8_t i2 = AS(SIYInstruction)->I2Value();  \
+  int length = 6;
+
 #define DECODE_RRE_INSTRUCTION(r1, r2)    \
   int r1 = AS(RREInstruction)->R1Value(); \
   int r2 = AS(RREInstruction)->R2Value(); \
   int length = 4;
 
+#define DECODE_RRE_INSTRUCTION_M3(r1, r2, m3) \
+  int r1 = AS(RREInstruction)->R1Value();     \
+  int r2 = AS(RREInstruction)->R2Value();     \
+  int m3 = AS(RREInstruction)->M3Value();     \
+  int length = 4;
+
+#define DECODE_RRE_INSTRUCTION_NO_R2(r1)  \
+  int r1 = AS(RREInstruction)->R1Value(); \
+  int length = 4;
+
+#define DECODE_RRD_INSTRUCTION(r1, r2, r3) \
+  int r1 = AS(RRDInstruction)->R1Value();  \
+  int r2 = AS(RRDInstruction)->R2Value();  \
+  int r3 = AS(RRDInstruction)->R3Value();  \
+  int length = 4;
+
+#define DECODE_RRF_E_INSTRUCTION(r1, r2, m3, m4) \
+  int r1 = AS(RRFInstruction)->R1Value();        \
+  int r2 = AS(RRFInstruction)->R2Value();        \
+  int m3 = AS(RRFInstruction)->M3Value();        \
+  int m4 = AS(RRFInstruction)->M4Value();        \
+  int length = 4;
+
+#define DECODE_RRF_A_INSTRUCTION(r1, r2, r3) \
+  int r1 = AS(RRFInstruction)->R1Value();    \
+  int r2 = AS(RRFInstruction)->R2Value();    \
+  int r3 = AS(RRFInstruction)->R3Value();    \
+  int length = 4;
+
 #define DECODE_RR_INSTRUCTION(r1, r2)    \
   int r1 = AS(RRInstruction)->R1Value(); \
   int r2 = AS(RRInstruction)->R2Value(); \
@@ -5994,6 +6043,13 @@
   int16_t i2 = AS(RIInstruction)->I2Value();                           \
   int length = 4;
 
+#define DECODE_RXE_INSTRUCTION(r1, b2, x2, d2) \
+  int r1 = AS(RXEInstruction)->R1Value();      \
+  int b2 = AS(RXEInstruction)->B2Value();      \
+  int x2 = AS(RXEInstruction)->X2Value();      \
+  int d2 = AS(RXEInstruction)->D2Value();      \
+  int length = 6;
+
 #define GET_ADDRESS(index_reg, base_reg, offset)       \
   (((index_reg) == 0) ? 0 : get_register(index_reg)) + \
       (((base_reg) == 0) ? 0 : get_register(base_reg)) + offset
@@ -6334,11 +6390,23 @@
   return length;
 }
 
-EVALUATE(SPM) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SPM) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(BALR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(BALR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(BCTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(BCTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(BCR) {
   DCHECK_OPCODE(BCR);
@@ -6357,11 +6425,23 @@
   return length;
 }
 
-EVALUATE(SVC) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SVC) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(BSM) { return DecodeInstructionOriginal(instr); }
+EVALUATE(BSM) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(BASSM) { return DecodeInstructionOriginal(instr); }
+EVALUATE(BASSM) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(BASR) {
   DCHECK_OPCODE(BASR);
@@ -6382,11 +6462,23 @@
   return length;
 }
 
-EVALUATE(MVCL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MVCL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CLCL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CLCL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LPR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LPR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(LNR) {
   DCHECK_OPCODE(LNR);
@@ -6560,9 +6652,17 @@
   return length;
 }
 
-EVALUATE(CDR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CDR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LER) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LER) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(STH) {
   DCHECK_OPCODE(STH);
@@ -6598,7 +6698,11 @@
   return length;
 }
 
-EVALUATE(IC_z) { return DecodeInstructionOriginal(instr); }
+EVALUATE(IC_z) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(EX) {
   DCHECK_OPCODE(EX);
@@ -6622,11 +6726,23 @@
   return length;
 }
 
-EVALUATE(BAL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(BAL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(BCT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(BCT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(BC) { return DecodeInstructionOriginal(instr); }
+EVALUATE(BC) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(LH) {
   DCHECK_OPCODE(LH);
@@ -6642,7 +6758,11 @@
   return length;
 }
 
-EVALUATE(CH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(AH) {
   DCHECK_OPCODE(AH);
@@ -6695,13 +6815,29 @@
   return length;
 }
 
-EVALUATE(BAS) { return DecodeInstructionOriginal(instr); }
+EVALUATE(BAS) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CVD) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CVD) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CVB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CVB) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LAE) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LAE) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(N) {
   DCHECK_OPCODE(N);
@@ -6808,13 +6944,29 @@
   return length;
 }
 
-EVALUATE(M) { return DecodeInstructionOriginal(instr); }
+EVALUATE(M) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(D) { return DecodeInstructionOriginal(instr); }
+EVALUATE(D) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(AL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(AL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(STD) {
   DCHECK_OPCODE(STD);
@@ -6838,7 +6990,11 @@
   return length;
 }
 
-EVALUATE(CD) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CD) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(STE) {
   DCHECK_OPCODE(STE);
@@ -6873,9 +7029,17 @@
   return length;
 }
 
-EVALUATE(BRXH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(BRXH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(BRXLE) { return DecodeInstructionOriginal(instr); }
+EVALUATE(BRXLE) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(BXH) {
   DCHECK_OPCODE(BXH);
@@ -6905,7 +7069,11 @@
   return length;
 }
 
-EVALUATE(BXLE) { return DecodeInstructionOriginal(instr); }
+EVALUATE(BXLE) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(SRL) {
   DCHECK_OPCODE(SRL);
@@ -7018,7 +7186,11 @@
   return length;
 }
 
-EVALUATE(SLDA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SLDA) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(STM) {
   DCHECK_OPCODE(STM);
@@ -7061,11 +7233,23 @@
   return length;
 }
 
-EVALUATE(MVI) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MVI) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TS) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TS) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(NI) { return DecodeInstructionOriginal(instr); }
+EVALUATE(NI) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(CLI) {
   DCHECK_OPCODE(CLI);
@@ -7078,9 +7262,17 @@
   return length;
 }
 
-EVALUATE(OI) { return DecodeInstructionOriginal(instr); }
+EVALUATE(OI) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(XI) { return DecodeInstructionOriginal(instr); }
+EVALUATE(XI) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(LM) {
   DCHECK_OPCODE(LM);
@@ -7102,25 +7294,65 @@
   return length;
 }
 
-EVALUATE(MVCLE) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MVCLE) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CLCLE) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CLCLE) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MC) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MC) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CDS) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CDS) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(STCM) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STCM) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(ICM) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ICM) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(BPRP) { return DecodeInstructionOriginal(instr); }
+EVALUATE(BPRP) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(BPP) { return DecodeInstructionOriginal(instr); }
+EVALUATE(BPP) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TRTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TRTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MVN) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MVN) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(MVC) {
   DCHECK_OPCODE(MVC);
@@ -7143,73 +7375,209 @@
   return length;
 }
 
-EVALUATE(MVZ) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MVZ) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(NC) { return DecodeInstructionOriginal(instr); }
+EVALUATE(NC) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CLC) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CLC) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(OC) { return DecodeInstructionOriginal(instr); }
+EVALUATE(OC) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(XC) { return DecodeInstructionOriginal(instr); }
+EVALUATE(XC) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MVCP) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MVCP) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TRT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TRT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(ED) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ED) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(EDMK) { return DecodeInstructionOriginal(instr); }
+EVALUATE(EDMK) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(PKU) { return DecodeInstructionOriginal(instr); }
+EVALUATE(PKU) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(UNPKU) { return DecodeInstructionOriginal(instr); }
+EVALUATE(UNPKU) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MVCIN) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MVCIN) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(PKA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(PKA) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(UNPKA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(UNPKA) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(PLO) { return DecodeInstructionOriginal(instr); }
+EVALUATE(PLO) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LMD) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LMD) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SRP) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SRP) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MVO) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MVO) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(PACK) { return DecodeInstructionOriginal(instr); }
+EVALUATE(PACK) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(UNPK) { return DecodeInstructionOriginal(instr); }
+EVALUATE(UNPK) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(ZAP) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ZAP) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(AP) { return DecodeInstructionOriginal(instr); }
+EVALUATE(AP) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SP) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SP) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MP) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MP) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(DP) { return DecodeInstructionOriginal(instr); }
+EVALUATE(DP) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(UPT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(UPT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(PFPO) { return DecodeInstructionOriginal(instr); }
+EVALUATE(PFPO) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(IIHH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(IIHH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(IIHL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(IIHL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(IILH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(IILH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(IILL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(IILL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(NIHH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(NIHH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(NIHL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(NIHL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(NILH) {
   DCHECK_OPCODE(NILH);
@@ -7233,9 +7601,17 @@
   return length;
 }
 
-EVALUATE(OIHH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(OIHH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(OIHL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(OIHL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(OILH) {
   DCHECK_OPCODE(OILH);
@@ -7258,15 +7634,35 @@
   return length;
 }
 
-EVALUATE(LLIHH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LLIHH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LLIHL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LLIHL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LLILH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LLILH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LLILL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LLILL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TMLH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TMLH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(TMLL) {
   DCHECK_OPCODE(TMLL);
@@ -7326,9 +7722,17 @@
   return length;
 }
 
-EVALUATE(TMHH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TMHH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TMHL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TMHL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(BRAS) {
   DCHECK_OPCODE(BRAS);
@@ -7437,7 +7841,11 @@
   return length;
 }
 
-EVALUATE(LGFI) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LGFI) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(BRASL) {
   DCHECK_OPCODE(BRASL);
@@ -7668,93 +8076,269 @@
   return length;
 }
 
-EVALUATE(LLHRL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LLHRL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LGHRL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LGHRL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LHRL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LHRL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LLGHRL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LLGHRL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(STHRL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STHRL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LGRL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LGRL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(STGRL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STGRL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LGFRL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LGFRL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LRL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LRL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LLGFRL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LLGFRL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(STRL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STRL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(EXRL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(EXRL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(PFDRL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(PFDRL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CGHRL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CGHRL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CHRL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CHRL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CGRL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CGRL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CGFRL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CGFRL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(ECTG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ECTG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CSST) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CSST) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LPD) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LPD) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LPDG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LPDG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(BRCTH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(BRCTH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(AIH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(AIH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(ALSIH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ALSIH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(ALSIHN) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ALSIHN) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CIH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CIH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(STCK) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STCK) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CFC) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CFC) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(IPM) { return DecodeInstructionOriginal(instr); }
+EVALUATE(IPM) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(HSCH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(HSCH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MSCH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MSCH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SSCH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SSCH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(STSCH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STSCH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TSCH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TSCH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TPI) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TPI) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SAL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SAL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(RSCH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(RSCH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(STCRW) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STCRW) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(STCPS) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STCPS) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(RCHP) { return DecodeInstructionOriginal(instr); }
+EVALUATE(RCHP) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SCHM) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SCHM) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CKSM) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CKSM) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SAR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SAR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(EAR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(EAR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(MSR) {
   DCHECK_OPCODE(MSR);
@@ -7765,49 +8349,144 @@
   return length;
 }
 
-EVALUATE(MVST) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MVST) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CUSE) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CUSE) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SRST) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SRST) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(XSCH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(XSCH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(STCKE) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STCKE) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(STCKF) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STCKF) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SRNM) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SRNM) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(STFPC) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STFPC) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LFPC) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LFPC) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TRE) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TRE) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CUUTF) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CUUTF) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CUTFU) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CUTFU) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(STFLE) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STFLE) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SRNMB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SRNMB) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SRNMT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SRNMT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LFAS) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LFAS) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(PPA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(PPA) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(ETND) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ETND) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TEND) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TEND) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(NIAI) { return DecodeInstructionOriginal(instr); }
+EVALUATE(NIAI) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TABORT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TABORT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TRAP4) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TRAP4) {
+  DCHECK_OPCODE(TRAP4);
+  int length = 4;
+  // whack the space of the caller allocated stack
+  int64_t sp_addr = get_register(sp);
+  for (int i = 0; i < kCalleeRegisterSaveAreaSize / kPointerSize; ++i) {
+    // we dont want to whack the RA (r14)
+    if (i != 14) (reinterpret_cast<intptr_t*>(sp_addr))[i] = 0xdeadbabe;
+  }
+  SoftwareInterrupt(instr);
+  return length;
+}
 
 EVALUATE(LPEBR) {
   DCHECK_OPCODE(LPEBR);
@@ -7827,7 +8506,11 @@
   return length;
 }
 
-EVALUATE(LNEBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LNEBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(LTEBR) {
   DCHECK_OPCODE(LTEBR);
@@ -7839,7 +8522,11 @@
   return length;
 }
 
-EVALUATE(LCEBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LCEBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(LDEBR) {
   DCHECK_OPCODE(LDEBR);
@@ -7850,13 +8537,29 @@
   return length;
 }
 
-EVALUATE(LXDBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LXDBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LXEBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LXEBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MXDBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MXDBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(KEBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(KEBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(CEBR) {
   DCHECK_OPCODE(CEBR);
@@ -7896,7 +8599,11 @@
   return length;
 }
 
-EVALUATE(MDEBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MDEBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(DEBR) {
   DCHECK_OPCODE(DEBR);
@@ -7910,9 +8617,17 @@
   return length;
 }
 
-EVALUATE(MAEBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MAEBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MSEBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MSEBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(LPDBR) {
   DCHECK_OPCODE(LPDBR);
@@ -7931,7 +8646,11 @@
   return length;
 }
 
-EVALUATE(LNDBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LNDBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 EVALUATE(LTDBR) {
   DCHECK_OPCODE(LTDBR);
@@ -7961,875 +8680,3890 @@
   return length;
 }
 
-EVALUATE(SQEBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SQEBR) {
+  DCHECK_OPCODE(SQEBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  float fr1_val = get_float32_from_d_register(r1);
+  float fr2_val = get_float32_from_d_register(r2);
+  fr1_val = std::sqrt(fr2_val);
+  set_d_register_from_float32(r1, fr1_val);
+  return length;
+}
 
-EVALUATE(SQDBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SQDBR) {
+  DCHECK_OPCODE(SQDBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  double r1_val = get_double_from_d_register(r1);
+  double r2_val = get_double_from_d_register(r2);
+  r1_val = std::sqrt(r2_val);
+  set_d_register_from_double(r1, r1_val);
+  return length;
+}
 
-EVALUATE(SQXBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SQXBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MEEBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MEEBR) {
+  DCHECK_OPCODE(MEEBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  float fr1_val = get_float32_from_d_register(r1);
+  float fr2_val = get_float32_from_d_register(r2);
+  fr1_val *= fr2_val;
+  set_d_register_from_float32(r1, fr1_val);
+  SetS390ConditionCode<float>(fr1_val, 0);
+  return length;
+}
 
-EVALUATE(KDBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(KDBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CDBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CDBR) {
+  DCHECK_OPCODE(CDBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  double r1_val = get_double_from_d_register(r1);
+  double r2_val = get_double_from_d_register(r2);
+  if (isNaN(r1_val) || isNaN(r2_val)) {
+    condition_reg_ = CC_OF;
+  } else {
+    SetS390ConditionCode<double>(r1_val, r2_val);
+  }
+  return length;
+}
 
-EVALUATE(ADBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ADBR) {
+  DCHECK_OPCODE(ADBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  double r1_val = get_double_from_d_register(r1);
+  double r2_val = get_double_from_d_register(r2);
+  r1_val += r2_val;
+  set_d_register_from_double(r1, r1_val);
+  SetS390ConditionCode<double>(r1_val, 0);
+  return length;
+}
 
-EVALUATE(SDBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SDBR) {
+  DCHECK_OPCODE(SDBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  double r1_val = get_double_from_d_register(r1);
+  double r2_val = get_double_from_d_register(r2);
+  r1_val -= r2_val;
+  set_d_register_from_double(r1, r1_val);
+  SetS390ConditionCode<double>(r1_val, 0);
+  return length;
+}
 
-EVALUATE(MDBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MDBR) {
+  DCHECK_OPCODE(MDBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  double r1_val = get_double_from_d_register(r1);
+  double r2_val = get_double_from_d_register(r2);
+  r1_val *= r2_val;
+  set_d_register_from_double(r1, r1_val);
+  SetS390ConditionCode<double>(r1_val, 0);
+  return length;
+}
 
-EVALUATE(DDBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(DDBR) {
+  DCHECK_OPCODE(DDBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  double r1_val = get_double_from_d_register(r1);
+  double r2_val = get_double_from_d_register(r2);
+  r1_val /= r2_val;
+  set_d_register_from_double(r1, r1_val);
+  SetS390ConditionCode<double>(r1_val, 0);
+  return length;
+}
 
-EVALUATE(MADBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MADBR) {
+  DCHECK_OPCODE(MADBR);
+  DECODE_RRD_INSTRUCTION(r1, r2, r3);
+  double r1_val = get_double_from_d_register(r1);
+  double r2_val = get_double_from_d_register(r2);
+  double r3_val = get_double_from_d_register(r3);
+  r1_val += r2_val * r3_val;
+  set_d_register_from_double(r1, r1_val);
+  SetS390ConditionCode<double>(r1_val, 0);
+  return length;
+}
 
-EVALUATE(MSDBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MSDBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LPXBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LPXBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LNXBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LNXBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LTXBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LTXBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LCXBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LCXBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LEDBRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LEDBRA) {
+  DCHECK_OPCODE(LEDBRA);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  double r2_val = get_double_from_d_register(r2);
+  set_d_register_from_float32(r1, static_cast<float>(r2_val));
+  return length;
+}
 
-EVALUATE(LDXBRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LDXBRA) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LEXBRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LEXBRA) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(FIXBRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(FIXBRA) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(KXBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(KXBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CXBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CXBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(AXBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(AXBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SXBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SXBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MXBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MXBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(DXBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(DXBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TBEDR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TBEDR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TBDR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TBDR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(DIEBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(DIEBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(FIEBRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(FIEBRA) {
+  DCHECK_OPCODE(FIEBRA);
+  DECODE_RRF_E_INSTRUCTION(r1, r2, m3, m4);
+  float r2_val = get_float32_from_d_register(r2);
+  CHECK(m4 == 0);
+  switch (m3) {
+    case Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0:
+      set_d_register_from_float32(r1, round(r2_val));
+      break;
+    case Assembler::FIDBRA_ROUND_TOWARD_0:
+      set_d_register_from_float32(r1, trunc(r2_val));
+      break;
+    case Assembler::FIDBRA_ROUND_TOWARD_POS_INF:
+      set_d_register_from_float32(r1, std::ceil(r2_val));
+      break;
+    case Assembler::FIDBRA_ROUND_TOWARD_NEG_INF:
+      set_d_register_from_float32(r1, std::floor(r2_val));
+      break;
+    default:
+      UNIMPLEMENTED();
+      break;
+  }
+  return length;
+}
 
-EVALUATE(THDER) { return DecodeInstructionOriginal(instr); }
+EVALUATE(THDER) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(THDR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(THDR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(DIDBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(DIDBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(FIDBRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(FIDBRA) {
+  DCHECK_OPCODE(FIDBRA);
+  DECODE_RRF_E_INSTRUCTION(r1, r2, m3, m4);
+  double r2_val = get_double_from_d_register(r2);
+  CHECK(m4 == 0);
+  switch (m3) {
+    case Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0:
+      set_d_register_from_double(r1, round(r2_val));
+      break;
+    case Assembler::FIDBRA_ROUND_TOWARD_0:
+      set_d_register_from_double(r1, trunc(r2_val));
+      break;
+    case Assembler::FIDBRA_ROUND_TOWARD_POS_INF:
+      set_d_register_from_double(r1, std::ceil(r2_val));
+      break;
+    case Assembler::FIDBRA_ROUND_TOWARD_NEG_INF:
+      set_d_register_from_double(r1, std::floor(r2_val));
+      break;
+    default:
+      UNIMPLEMENTED();
+      break;
+  }
+  return length;
+}
 
-EVALUATE(LXR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LXR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LPDFR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LPDFR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LNDFR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LNDFR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LCDFR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LCDFR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LZER) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LZER) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LZDR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LZDR) {
+  DCHECK_OPCODE(LZDR);
+  DECODE_RRE_INSTRUCTION_NO_R2(r1);
+  set_d_register_from_double(r1, 0.0);
+  return length;
+}
 
-EVALUATE(LZXR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LZXR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SFPC) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SFPC) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SFASR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SFASR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(EFPC) { return DecodeInstructionOriginal(instr); }
+EVALUATE(EFPC) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CELFBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CELFBR) {
+  DCHECK_OPCODE(CELFBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  uint32_t r2_val = get_low_register<uint32_t>(r2);
+  float r1_val = static_cast<float>(r2_val);
+  set_d_register_from_float32(r1, r1_val);
+  return length;
+}
 
-EVALUATE(CDLFBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CDLFBR) {
+  DCHECK_OPCODE(CDLFBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  uint32_t r2_val = get_low_register<uint32_t>(r2);
+  double r1_val = static_cast<double>(r2_val);
+  set_d_register_from_double(r1, r1_val);
+  return length;
+}
 
-EVALUATE(CXLFBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CXLFBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CEFBRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CEFBRA) {
+  DCHECK_OPCODE(CEFBRA);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int32_t fr2_val = get_low_register<int32_t>(r2);
+  float fr1_val = static_cast<float>(fr2_val);
+  set_d_register_from_float32(r1, fr1_val);
+  return length;
+}
 
-EVALUATE(CDFBRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CDFBRA) {
+  DCHECK_OPCODE(CDFBRA);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  double r1_val = static_cast<double>(r2_val);
+  set_d_register_from_double(r1, r1_val);
+  return length;
+}
 
-EVALUATE(CXFBRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CXFBRA) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CFEBRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CFEBRA) {
+  DCHECK_OPCODE(CFEBRA);
+  DECODE_RRE_INSTRUCTION_M3(r1, r2, mask_val);
+  float r2_fval = get_float32_from_d_register(r2);
+  int32_t r1_val = 0;
 
-EVALUATE(CFDBRA) { return DecodeInstructionOriginal(instr); }
+  SetS390RoundConditionCode(r2_fval, INT32_MAX, INT32_MIN);
 
-EVALUATE(CFXBRA) { return DecodeInstructionOriginal(instr); }
+  switch (mask_val) {
+    case CURRENT_ROUNDING_MODE:
+    case ROUND_TO_PREPARE_FOR_SHORTER_PRECISION: {
+      r1_val = static_cast<int32_t>(r2_fval);
+      break;
+    }
+    case ROUND_TO_NEAREST_WITH_TIES_AWAY_FROM_0: {
+      float ceil_val = std::ceil(r2_fval);
+      float floor_val = std::floor(r2_fval);
+      float sub_val1 = std::fabs(r2_fval - floor_val);
+      float sub_val2 = std::fabs(r2_fval - ceil_val);
+      if (sub_val1 > sub_val2) {
+        r1_val = static_cast<int32_t>(ceil_val);
+      } else if (sub_val1 < sub_val2) {
+        r1_val = static_cast<int32_t>(floor_val);
+      } else {  // round away from zero:
+        if (r2_fval > 0.0) {
+          r1_val = static_cast<int32_t>(ceil_val);
+        } else {
+          r1_val = static_cast<int32_t>(floor_val);
+        }
+      }
+      break;
+    }
+    case ROUND_TO_NEAREST_WITH_TIES_TO_EVEN: {
+      float ceil_val = std::ceil(r2_fval);
+      float floor_val = std::floor(r2_fval);
+      float sub_val1 = std::fabs(r2_fval - floor_val);
+      float sub_val2 = std::fabs(r2_fval - ceil_val);
+      if (sub_val1 > sub_val2) {
+        r1_val = static_cast<int32_t>(ceil_val);
+      } else if (sub_val1 < sub_val2) {
+        r1_val = static_cast<int32_t>(floor_val);
+      } else {  // check which one is even:
+        int32_t c_v = static_cast<int32_t>(ceil_val);
+        int32_t f_v = static_cast<int32_t>(floor_val);
+        if (f_v % 2 == 0)
+          r1_val = f_v;
+        else
+          r1_val = c_v;
+      }
+      break;
+    }
+    case ROUND_TOWARD_0: {
+      // check for overflow, cast r2_fval to 64bit integer
+      // then check value within the range of INT_MIN and INT_MAX
+      // and set condition code accordingly
+      int64_t temp = static_cast<int64_t>(r2_fval);
+      if (temp < INT_MIN || temp > INT_MAX) {
+        condition_reg_ = CC_OF;
+      }
+      r1_val = static_cast<int32_t>(r2_fval);
+      break;
+    }
+    case ROUND_TOWARD_PLUS_INFINITE: {
+      r1_val = static_cast<int32_t>(std::ceil(r2_fval));
+      break;
+    }
+    case ROUND_TOWARD_MINUS_INFINITE: {
+      // check for overflow, cast r2_fval to 64bit integer
+      // then check value within the range of INT_MIN and INT_MAX
+      // and set condition code accordingly
+      int64_t temp = static_cast<int64_t>(std::floor(r2_fval));
+      if (temp < INT_MIN || temp > INT_MAX) {
+        condition_reg_ = CC_OF;
+      }
+      r1_val = static_cast<int32_t>(std::floor(r2_fval));
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+  set_low_register(r1, r1_val);
+  return length;
+}
 
-EVALUATE(CLFEBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CFDBRA) {
+  DCHECK_OPCODE(CFDBRA);
+  DECODE_RRE_INSTRUCTION_M3(r1, r2, mask_val);
+  double r2_val = get_double_from_d_register(r2);
+  int32_t r1_val = 0;
 
-EVALUATE(CLFDBR) { return DecodeInstructionOriginal(instr); }
+  SetS390RoundConditionCode(r2_val, INT32_MAX, INT32_MIN);
 
-EVALUATE(CLFXBR) { return DecodeInstructionOriginal(instr); }
+  switch (mask_val) {
+    case CURRENT_ROUNDING_MODE:
+    case ROUND_TO_PREPARE_FOR_SHORTER_PRECISION: {
+      r1_val = static_cast<int32_t>(r2_val);
+      break;
+    }
+    case ROUND_TO_NEAREST_WITH_TIES_AWAY_FROM_0: {
+      double ceil_val = std::ceil(r2_val);
+      double floor_val = std::floor(r2_val);
+      double sub_val1 = std::fabs(r2_val - floor_val);
+      double sub_val2 = std::fabs(r2_val - ceil_val);
+      if (sub_val1 > sub_val2) {
+        r1_val = static_cast<int32_t>(ceil_val);
+      } else if (sub_val1 < sub_val2) {
+        r1_val = static_cast<int32_t>(floor_val);
+      } else {  // round away from zero:
+        if (r2_val > 0.0) {
+          r1_val = static_cast<int32_t>(ceil_val);
+        } else {
+          r1_val = static_cast<int32_t>(floor_val);
+        }
+      }
+      break;
+    }
+    case ROUND_TO_NEAREST_WITH_TIES_TO_EVEN: {
+      double ceil_val = std::ceil(r2_val);
+      double floor_val = std::floor(r2_val);
+      double sub_val1 = std::fabs(r2_val - floor_val);
+      double sub_val2 = std::fabs(r2_val - ceil_val);
+      if (sub_val1 > sub_val2) {
+        r1_val = static_cast<int32_t>(ceil_val);
+      } else if (sub_val1 < sub_val2) {
+        r1_val = static_cast<int32_t>(floor_val);
+      } else {  // check which one is even:
+        int32_t c_v = static_cast<int32_t>(ceil_val);
+        int32_t f_v = static_cast<int32_t>(floor_val);
+        if (f_v % 2 == 0)
+          r1_val = f_v;
+        else
+          r1_val = c_v;
+      }
+      break;
+    }
+    case ROUND_TOWARD_0: {
+      // check for overflow, cast r2_val to 64bit integer
+      // then check value within the range of INT_MIN and INT_MAX
+      // and set condition code accordingly
+      int64_t temp = static_cast<int64_t>(r2_val);
+      if (temp < INT_MIN || temp > INT_MAX) {
+        condition_reg_ = CC_OF;
+      }
+      r1_val = static_cast<int32_t>(r2_val);
+      break;
+    }
+    case ROUND_TOWARD_PLUS_INFINITE: {
+      r1_val = static_cast<int32_t>(std::ceil(r2_val));
+      break;
+    }
+    case ROUND_TOWARD_MINUS_INFINITE: {
+      // check for overflow, cast r2_val to 64bit integer
+      // then check value within the range of INT_MIN and INT_MAX
+      // and set condition code accordingly
+      int64_t temp = static_cast<int64_t>(std::floor(r2_val));
+      if (temp < INT_MIN || temp > INT_MAX) {
+        condition_reg_ = CC_OF;
+      }
+      r1_val = static_cast<int32_t>(std::floor(r2_val));
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+  set_low_register(r1, r1_val);
+  return length;
+}
 
-EVALUATE(CELGBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CFXBRA) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CDLGBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CLFEBR) {
+  DCHECK_OPCODE(CLFEBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  float r2_val = get_float32_from_d_register(r2);
+  uint32_t r1_val = static_cast<uint32_t>(r2_val);
+  set_low_register(r1, r1_val);
+  SetS390ConvertConditionCode<double>(r2_val, r1_val, UINT32_MAX);
+  return length;
+}
 
-EVALUATE(CXLGBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CLFDBR) {
+  DCHECK_OPCODE(CLFDBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  double r2_val = get_double_from_d_register(r2);
+  uint32_t r1_val = static_cast<uint32_t>(r2_val);
+  set_low_register(r1, r1_val);
+  SetS390ConvertConditionCode<double>(r2_val, r1_val, UINT32_MAX);
+  return length;
+}
 
-EVALUATE(CEGBRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CLFXBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CDGBRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CELGBR) {
+  DCHECK_OPCODE(CELGBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  uint64_t r2_val = get_register(r2);
+  float r1_val = static_cast<float>(r2_val);
+  set_d_register_from_float32(r1, r1_val);
+  return length;
+}
 
-EVALUATE(CXGBRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CDLGBR) {
+  DCHECK_OPCODE(CDLGBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  uint64_t r2_val = get_register(r2);
+  double r1_val = static_cast<double>(r2_val);
+  set_d_register_from_double(r1, r1_val);
+  return length;
+}
 
-EVALUATE(CGEBRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CXLGBR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CGDBRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CEGBRA) {
+  DCHECK_OPCODE(CEGBRA);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int64_t fr2_val = get_register(r2);
+  float fr1_val = static_cast<float>(fr2_val);
+  set_d_register_from_float32(r1, fr1_val);
+  return length;
+}
 
-EVALUATE(CGXBRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CDGBRA) {
+  DCHECK_OPCODE(CDGBRA);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int64_t r2_val = get_register(r2);
+  double r1_val = static_cast<double>(r2_val);
+  set_d_register_from_double(r1, r1_val);
+  return length;
+}
 
-EVALUATE(CLGEBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CXGBRA) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CLGDBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CGEBRA) {
+  DCHECK_OPCODE(CGEBRA);
+  DECODE_RRE_INSTRUCTION_M3(r1, r2, mask_val);
+  float r2_fval = get_float32_from_d_register(r2);
+  int64_t r1_val = 0;
 
-EVALUATE(CFER) { return DecodeInstructionOriginal(instr); }
+  SetS390RoundConditionCode(r2_fval, INT64_MAX, INT64_MIN);
 
-EVALUATE(CFDR) { return DecodeInstructionOriginal(instr); }
+  switch (mask_val) {
+    case CURRENT_ROUNDING_MODE:
+    case ROUND_TO_NEAREST_WITH_TIES_AWAY_FROM_0:
+    case ROUND_TO_PREPARE_FOR_SHORTER_PRECISION: {
+      UNIMPLEMENTED();
+      break;
+    }
+    case ROUND_TO_NEAREST_WITH_TIES_TO_EVEN: {
+      float ceil_val = std::ceil(r2_fval);
+      float floor_val = std::floor(r2_fval);
+      if (std::abs(r2_fval - floor_val) > std::abs(r2_fval - ceil_val)) {
+        r1_val = static_cast<int64_t>(ceil_val);
+      } else if (std::abs(r2_fval - floor_val) < std::abs(r2_fval - ceil_val)) {
+        r1_val = static_cast<int64_t>(floor_val);
+      } else {  // check which one is even:
+        int64_t c_v = static_cast<int64_t>(ceil_val);
+        int64_t f_v = static_cast<int64_t>(floor_val);
+        if (f_v % 2 == 0)
+          r1_val = f_v;
+        else
+          r1_val = c_v;
+      }
+      break;
+    }
+    case ROUND_TOWARD_0: {
+      r1_val = static_cast<int64_t>(r2_fval);
+      break;
+    }
+    case ROUND_TOWARD_PLUS_INFINITE: {
+      r1_val = static_cast<int64_t>(std::ceil(r2_fval));
+      break;
+    }
+    case ROUND_TOWARD_MINUS_INFINITE: {
+      r1_val = static_cast<int64_t>(std::floor(r2_fval));
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+  set_register(r1, r1_val);
+  return length;
+}
 
-EVALUATE(CFXR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CGDBRA) {
+  DCHECK_OPCODE(CGDBRA);
+  DECODE_RRE_INSTRUCTION_M3(r1, r2, mask_val);
+  double r2_val = get_double_from_d_register(r2);
+  int64_t r1_val = 0;
 
-EVALUATE(LDGR) { return DecodeInstructionOriginal(instr); }
+  SetS390RoundConditionCode(r2_val, INT64_MAX, INT64_MIN);
 
-EVALUATE(CGER) { return DecodeInstructionOriginal(instr); }
+  switch (mask_val) {
+    case CURRENT_ROUNDING_MODE:
+    case ROUND_TO_NEAREST_WITH_TIES_AWAY_FROM_0:
+    case ROUND_TO_PREPARE_FOR_SHORTER_PRECISION: {
+      UNIMPLEMENTED();
+      break;
+    }
+    case ROUND_TO_NEAREST_WITH_TIES_TO_EVEN: {
+      double ceil_val = std::ceil(r2_val);
+      double floor_val = std::floor(r2_val);
+      if (std::abs(r2_val - floor_val) > std::abs(r2_val - ceil_val)) {
+        r1_val = static_cast<int64_t>(ceil_val);
+      } else if (std::abs(r2_val - floor_val) < std::abs(r2_val - ceil_val)) {
+        r1_val = static_cast<int64_t>(floor_val);
+      } else {  // check which one is even:
+        int64_t c_v = static_cast<int64_t>(ceil_val);
+        int64_t f_v = static_cast<int64_t>(floor_val);
+        if (f_v % 2 == 0)
+          r1_val = f_v;
+        else
+          r1_val = c_v;
+      }
+      break;
+    }
+    case ROUND_TOWARD_0: {
+      r1_val = static_cast<int64_t>(r2_val);
+      break;
+    }
+    case ROUND_TOWARD_PLUS_INFINITE: {
+      r1_val = static_cast<int64_t>(std::ceil(r2_val));
+      break;
+    }
+    case ROUND_TOWARD_MINUS_INFINITE: {
+      r1_val = static_cast<int64_t>(std::floor(r2_val));
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+  set_register(r1, r1_val);
+  return length;
+}
 
-EVALUATE(CGDR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CGXBRA) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CGXR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CLGEBR) {
+  DCHECK_OPCODE(CLGEBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  float r2_val = get_float32_from_d_register(r2);
+  uint64_t r1_val = static_cast<uint64_t>(r2_val);
+  set_register(r1, r1_val);
+  SetS390ConvertConditionCode<double>(r2_val, r1_val, UINT64_MAX);
+  return length;
+}
 
-EVALUATE(LGDR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CLGDBR) {
+  DCHECK_OPCODE(CLGDBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  double r2_val = get_double_from_d_register(r2);
+  uint64_t r1_val = static_cast<uint64_t>(r2_val);
+  set_register(r1, r1_val);
+  SetS390ConvertConditionCode<double>(r2_val, r1_val, UINT64_MAX);
+  return length;
+}
 
-EVALUATE(MDTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CFER) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MDTRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CFDR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(DDTRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CFXR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(ADTRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LDGR) {
+  DCHECK_OPCODE(LDGR);
+  // Load FPR from GPR (L <- 64)
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  uint64_t int_val = get_register(r2);
+  // double double_val = bit_cast<double, uint64_t>(int_val);
+  // set_d_register_from_double(rreInst->R1Value(), double_val);
+  set_d_register(r1, int_val);
+  return length;
+}
 
-EVALUATE(SDTRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CGER) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LDETR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CGDR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LEDTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CGXR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LTDTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LGDR) {
+  DCHECK_OPCODE(LGDR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  // Load GPR from FPR (64 <- L)
+  int64_t double_val = get_d_register(r2);
+  set_register(r1, double_val);
+  return length;
+}
 
-EVALUATE(FIDTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MDTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MXTRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MDTRA) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(DXTRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(DDTRA) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(AXTRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ADTRA) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SXTRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SDTRA) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LXDTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LDETR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LDXTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LEDTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LTXTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LTDTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(FIXTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(FIDTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(KDTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MXTRA) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CGDTRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(DXTRA) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CUDTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(AXTRA) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CDTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SXTRA) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(EEDTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LXDTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(ESDTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LDXTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(KXTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LTXTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CGXTRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(FIXTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CUXTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(KDTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CSXTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CGDTRA) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CXTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CUDTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(EEXTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CDTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(ESXTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(EEDTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CDGTRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ESDTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CDUTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(KXTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CDSTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CGXTRA) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CEDTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CUXTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(QADTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CSXTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(IEDTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CXTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(RRDTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(EEXTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CXGTRA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ESXTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CXUTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CDGTRA) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CXSTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CDUTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CEXTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CDSTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(QAXTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CEDTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(IEXTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(QADTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(RRXTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(IEDTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LPGR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(RRDTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LNGR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CXGTRA) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LTGR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CXUTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LCGR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CXSTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SGR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CEXTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(ALGR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(QAXTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SLGR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(IEXTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MSGR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(RRXTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(DSGR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LPGR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LRVGR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LNGR) {
+  DCHECK_OPCODE(LNGR);
+  // Load Negative (64)
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int64_t r2_val = get_register(r2);
+  r2_val = (r2_val >= 0) ? -r2_val : r2_val;  // If pos, then negate it.
+  set_register(r1, r2_val);
+  condition_reg_ = (r2_val == 0) ? CC_EQ : CC_LT;  // CC0 - result is zero
+  // CC1 - result is negative
+  return length;
+}
 
-EVALUATE(LPGFR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LTGR) {
+  DCHECK_OPCODE(LTGR);
+  // Load Register (64)
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int64_t r2_val = get_register(r2);
+  SetS390ConditionCode<int64_t>(r2_val, 0);
+  set_register(r1, get_register(r2));
+  return length;
+}
 
-EVALUATE(LNGFR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LCGR) {
+  DCHECK_OPCODE(LCGR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int64_t r2_val = get_register(r2);
+  r2_val = ~r2_val;
+  r2_val = r2_val + 1;
+  set_register(r1, r2_val);
+  SetS390ConditionCode<int64_t>(r2_val, 0);
+  // if the input is INT_MIN, loading its compliment would be overflowing
+  if (r2_val < 0 && (r2_val + 1) > 0) {
+    SetS390OverflowCode(true);
+  }
+  return length;
+}
 
-EVALUATE(LTGFR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SGR) {
+  DCHECK_OPCODE(SGR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int64_t r1_val = get_register(r1);
+  int64_t r2_val = get_register(r2);
+  bool isOF = false;
+  isOF = CheckOverflowForIntSub(r1_val, r2_val, int64_t);
+  r1_val -= r2_val;
+  SetS390ConditionCode<int64_t>(r1_val, 0);
+  SetS390OverflowCode(isOF);
+  set_register(r1, r1_val);
+  return length;
+}
 
-EVALUATE(LCGFR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ALGR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LLGFR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SLGR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LLGTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MSGR) {
+  DCHECK_OPCODE(MSGR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int64_t r1_val = get_register(r1);
+  int64_t r2_val = get_register(r2);
+  set_register(r1, r1_val * r2_val);
+  return length;
+}
 
-EVALUATE(AGFR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(DSGR) {
+  DCHECK_OPCODE(DSGR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
 
-EVALUATE(SGFR) { return DecodeInstructionOriginal(instr); }
+  DCHECK(r1 % 2 == 0);
 
-EVALUATE(ALGFR) { return DecodeInstructionOriginal(instr); }
+  int64_t dividend = get_register(r1 + 1);
+  int64_t divisor = get_register(r2);
+  set_register(r1, dividend % divisor);
+  set_register(r1 + 1, dividend / divisor);
+  return length;
+}
 
-EVALUATE(SLGFR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LRVGR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MSGFR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LPGFR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(DSGFR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LNGFR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(KMAC) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LTGFR) {
+  DCHECK_OPCODE(LTGFR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  // Load and Test Register (64 <- 32)  (Sign Extends 32-bit val)
+  // Load Register (64 <- 32)  (Sign Extends 32-bit val)
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  int64_t result = static_cast<int64_t>(r2_val);
+  set_register(r1, result);
+  SetS390ConditionCode<int64_t>(result, 0);
+  return length;
+}
 
-EVALUATE(LRVR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LCGFR) {
+  DCHECK_OPCODE(LCGFR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  // Load and Test Register (64 <- 32)  (Sign Extends 32-bit val)
+  // Load Register (64 <- 32)  (Sign Extends 32-bit val)
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  int64_t result = static_cast<int64_t>(r2_val);
+  set_register(r1, result);
+  return length;
+}
 
-EVALUATE(CGR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LLGFR) {
+  DCHECK_OPCODE(LLGFR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  uint64_t r2_finalval = (static_cast<uint64_t>(r2_val) & 0x00000000ffffffff);
+  set_register(r1, r2_finalval);
+  return length;
+}
 
-EVALUATE(CLGR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LLGTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(KMF) { return DecodeInstructionOriginal(instr); }
+EVALUATE(AGFR) {
+  DCHECK_OPCODE(AGFR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  // Add Register (64 <- 32)  (Sign Extends 32-bit val)
+  int64_t r1_val = get_register(r1);
+  int64_t r2_val = static_cast<int64_t>(get_low_register<int32_t>(r2));
+  bool isOF = CheckOverflowForIntAdd(r1_val, r2_val, int64_t);
+  r1_val += r2_val;
+  SetS390ConditionCode<int64_t>(r1_val, 0);
+  SetS390OverflowCode(isOF);
+  set_register(r1, r1_val);
+  return length;
+}
 
-EVALUATE(KMO) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SGFR) {
+  DCHECK_OPCODE(SGFR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  // Sub Reg (64 <- 32)
+  int64_t r1_val = get_register(r1);
+  int64_t r2_val = static_cast<int64_t>(get_low_register<int32_t>(r2));
+  bool isOF = false;
+  isOF = CheckOverflowForIntSub(r1_val, r2_val, int64_t);
+  r1_val -= r2_val;
+  SetS390ConditionCode<int64_t>(r1_val, 0);
+  SetS390OverflowCode(isOF);
+  set_register(r1, r1_val);
+  return length;
+}
 
-EVALUATE(PCC) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ALGFR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(KMCTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SLGFR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(KM) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MSGFR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(KMC) { return DecodeInstructionOriginal(instr); }
+EVALUATE(DSGFR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CGFR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(KMAC) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(KIMD) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LRVR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(KLMD) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CGR) {
+  DCHECK_OPCODE(CGR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  // Compare (64)
+  int64_t r1_val = get_register(r1);
+  int64_t r2_val = get_register(r2);
+  SetS390ConditionCode<int64_t>(r1_val, r2_val);
+  return length;
+}
 
-EVALUATE(CFDTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CLGR) {
+  DCHECK_OPCODE(CLGR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  // Compare Logical (64)
+  uint64_t r1_val = static_cast<uint64_t>(get_register(r1));
+  uint64_t r2_val = static_cast<uint64_t>(get_register(r2));
+  SetS390ConditionCode<uint64_t>(r1_val, r2_val);
+  return length;
+}
 
-EVALUATE(CLGDTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(KMF) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CLFDTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(KMO) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(BCTGR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(PCC) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CFXTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(KMCTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CLFXTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(KM) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CDFTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(KMC) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CDLGTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CGFR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CDLFTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(KIMD) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CXFTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(KLMD) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CXLGTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CFDTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CXLFTR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CLGDTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CGRT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CLFDTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(NGR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(BCTGR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(OGR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CFXTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(XGR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CLFXTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(FLOGR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CDFTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LLGCR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CDLGTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LLGHR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CDLFTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MLGR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CXFTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(DLGR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CXLGTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(ALCGR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CXLFTR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SLBGR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CGRT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(EPSW) { return DecodeInstructionOriginal(instr); }
+EVALUATE(NGR) {
+  DCHECK_OPCODE(NGR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int64_t r1_val = get_register(r1);
+  int64_t r2_val = get_register(r2);
+  r1_val &= r2_val;
+  SetS390BitWiseConditionCode<uint64_t>(r1_val);
+  set_register(r1, r1_val);
+  return length;
+}
 
-EVALUATE(TRTT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(OGR) {
+  DCHECK_OPCODE(OGR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int64_t r1_val = get_register(r1);
+  int64_t r2_val = get_register(r2);
+  r1_val |= r2_val;
+  SetS390BitWiseConditionCode<uint64_t>(r1_val);
+  set_register(r1, r1_val);
+  return length;
+}
 
-EVALUATE(TRTO) { return DecodeInstructionOriginal(instr); }
+EVALUATE(XGR) {
+  DCHECK_OPCODE(XGR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int64_t r1_val = get_register(r1);
+  int64_t r2_val = get_register(r2);
+  r1_val ^= r2_val;
+  SetS390BitWiseConditionCode<uint64_t>(r1_val);
+  set_register(r1, r1_val);
+  return length;
+}
 
-EVALUATE(TROT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(FLOGR) {
+  DCHECK_OPCODE(FLOGR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
 
-EVALUATE(TROO) { return DecodeInstructionOriginal(instr); }
+  DCHECK(r1 % 2 == 0);
 
-EVALUATE(LLCR) { return DecodeInstructionOriginal(instr); }
+  int64_t r2_val = get_register(r2);
 
-EVALUATE(LLHR) { return DecodeInstructionOriginal(instr); }
+  int i = 0;
+  for (; i < 64; i++) {
+    if (r2_val < 0) break;
+    r2_val <<= 1;
+  }
 
-EVALUATE(MLR) { return DecodeInstructionOriginal(instr); }
+  r2_val = get_register(r2);
 
-EVALUATE(DLR) { return DecodeInstructionOriginal(instr); }
+  int64_t mask = ~(1 << (63 - i));
+  set_register(r1, i);
+  set_register(r1 + 1, r2_val & mask);
+  return length;
+}
 
-EVALUATE(ALCR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LLGCR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SLBR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LLGHR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CU14) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MLGR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CU24) { return DecodeInstructionOriginal(instr); }
+EVALUATE(DLGR) {
+  DCHECK_OPCODE(DLGR);
+#ifdef V8_TARGET_ARCH_S390X
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  uint64_t r1_val = get_register(r1);
+  uint64_t r2_val = get_register(r2);
+  DCHECK(r1 % 2 == 0);
+  unsigned __int128 dividend = static_cast<unsigned __int128>(r1_val) << 64;
+  dividend += get_register(r1 + 1);
+  uint64_t remainder = dividend % r2_val;
+  uint64_t quotient = dividend / r2_val;
+  r1_val = remainder;
+  set_register(r1, remainder);
+  set_register(r1 + 1, quotient);
+  return length;
+#else
+  UNREACHABLE();
+#endif
+}
 
-EVALUATE(CU41) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ALCGR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CU42) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SLBGR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TRTRE) { return DecodeInstructionOriginal(instr); }
+EVALUATE(EPSW) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SRSTU) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TRTT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TRTE) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TRTO) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(AHHHR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TROT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SHHHR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TROO) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(ALHHHR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LLCR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SLHHHR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LLHR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CHHR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MLR) {
+  DCHECK_OPCODE(MLR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  DCHECK(r1 % 2 == 0);
+
+  uint32_t r1_val = get_low_register<uint32_t>(r1 + 1);
+  uint32_t r2_val = get_low_register<uint32_t>(r2);
+  uint64_t product =
+      static_cast<uint64_t>(r1_val) * static_cast<uint64_t>(r2_val);
+  int32_t high_bits = product >> 32;
+  int32_t low_bits = product & 0x00000000FFFFFFFF;
+  set_low_register(r1, high_bits);
+  set_low_register(r1 + 1, low_bits);
+  return length;
+}
+
+EVALUATE(DLR) {
+  DCHECK_OPCODE(DLR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  uint32_t r1_val = get_low_register<uint32_t>(r1);
+  uint32_t r2_val = get_low_register<uint32_t>(r2);
+  DCHECK(r1 % 2 == 0);
+  uint64_t dividend = static_cast<uint64_t>(r1_val) << 32;
+  dividend += get_low_register<uint32_t>(r1 + 1);
+  uint32_t remainder = dividend % r2_val;
+  uint32_t quotient = dividend / r2_val;
+  r1_val = remainder;
+  set_low_register(r1, remainder);
+  set_low_register(r1 + 1, quotient);
+  return length;
+}
+
+EVALUATE(ALCR) {
+  DCHECK_OPCODE(ALCR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  uint32_t r1_val = get_low_register<uint32_t>(r1);
+  uint32_t r2_val = get_low_register<uint32_t>(r2);
+  uint32_t alu_out = 0;
+  bool isOF = false;
+
+  alu_out = r1_val + r2_val;
+  bool isOF_original = CheckOverflowForUIntAdd(r1_val, r2_val);
+  if (TestConditionCode((Condition)2) || TestConditionCode((Condition)3)) {
+    alu_out = alu_out + 1;
+    isOF = isOF_original || CheckOverflowForUIntAdd(alu_out, 1);
+  } else {
+    isOF = isOF_original;
+  }
+  set_low_register(r1, alu_out);
+  SetS390ConditionCodeCarry<uint32_t>(alu_out, isOF);
+  return length;
+}
+
+EVALUATE(SLBR) {
+  DCHECK_OPCODE(SLBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  uint32_t r1_val = get_low_register<uint32_t>(r1);
+  uint32_t r2_val = get_low_register<uint32_t>(r2);
+  uint32_t alu_out = 0;
+  bool isOF = false;
+
+  alu_out = r1_val - r2_val;
+  bool isOF_original = CheckOverflowForUIntSub(r1_val, r2_val);
+  if (TestConditionCode((Condition)2) || TestConditionCode((Condition)3)) {
+    alu_out = alu_out - 1;
+    isOF = isOF_original || CheckOverflowForUIntSub(alu_out, 1);
+  } else {
+    isOF = isOF_original;
+  }
+  set_low_register(r1, alu_out);
+  SetS390ConditionCodeCarry<uint32_t>(alu_out, isOF);
+  return length;
+}
+
+EVALUATE(CU14) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
+
+EVALUATE(CU24) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
+
+EVALUATE(CU41) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
+
+EVALUATE(CU42) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
+
+EVALUATE(TRTRE) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
+
+EVALUATE(SRSTU) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
+
+EVALUATE(TRTE) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
+
+EVALUATE(AHHHR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
+
+EVALUATE(SHHHR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
+
+EVALUATE(ALHHHR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
+
+EVALUATE(SLHHHR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
+
+EVALUATE(CHHR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
+
+EVALUATE(AHHLR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
+
+EVALUATE(SHHLR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
+
+EVALUATE(ALHHLR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
+
+EVALUATE(SLHHLR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
+
+EVALUATE(CHLR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
+
+EVALUATE(POPCNT_Z) {
+  DCHECK_OPCODE(POPCNT_Z);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int64_t r2_val = get_register(r2);
+  int64_t r1_val = 0;
+
+  uint8_t* r2_val_ptr = reinterpret_cast<uint8_t*>(&r2_val);
+  uint8_t* r1_val_ptr = reinterpret_cast<uint8_t*>(&r1_val);
+  for (int i = 0; i < 8; i++) {
+    uint32_t x = static_cast<uint32_t>(r2_val_ptr[i]);
+#if defined(__GNUC__)
+    r1_val_ptr[i] = __builtin_popcount(x);
+#else
+#error unsupport __builtin_popcount
+#endif
+  }
+  set_register(r1, static_cast<uint64_t>(r1_val));
+  return length;
+}
+
+EVALUATE(LOCGR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
+
+EVALUATE(NGRK) {
+  DCHECK_OPCODE(NGRK);
+  DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+  // 64-bit Non-clobbering arithmetics / bitwise ops.
+  int64_t r2_val = get_register(r2);
+  int64_t r3_val = get_register(r3);
+  uint64_t bitwise_result = 0;
+  bitwise_result = r2_val & r3_val;
+  SetS390BitWiseConditionCode<uint64_t>(bitwise_result);
+  set_register(r1, bitwise_result);
+  return length;
+}
+
+EVALUATE(OGRK) {
+  DCHECK_OPCODE(OGRK);
+  DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+  // 64-bit Non-clobbering arithmetics / bitwise ops.
+  int64_t r2_val = get_register(r2);
+  int64_t r3_val = get_register(r3);
+  uint64_t bitwise_result = 0;
+  bitwise_result = r2_val | r3_val;
+  SetS390BitWiseConditionCode<uint64_t>(bitwise_result);
+  set_register(r1, bitwise_result);
+  return length;
+}
+
+EVALUATE(XGRK) {
+  DCHECK_OPCODE(XGRK);
+  DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+  // 64-bit Non-clobbering arithmetics / bitwise ops.
+  int64_t r2_val = get_register(r2);
+  int64_t r3_val = get_register(r3);
+  uint64_t bitwise_result = 0;
+  bitwise_result = r2_val ^ r3_val;
+  SetS390BitWiseConditionCode<uint64_t>(bitwise_result);
+  set_register(r1, bitwise_result);
+  return length;
+}
+
+EVALUATE(AGRK) {
+  DCHECK_OPCODE(AGRK);
+  DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+  // 64-bit Non-clobbering arithmetics / bitwise ops.
+  int64_t r2_val = get_register(r2);
+  int64_t r3_val = get_register(r3);
+  bool isOF = CheckOverflowForIntAdd(r2_val, r3_val, int64_t);
+  SetS390ConditionCode<int64_t>(r2_val + r3_val, 0);
+  SetS390OverflowCode(isOF);
+  set_register(r1, r2_val + r3_val);
+  return length;
+}
+
+EVALUATE(SGRK) {
+  DCHECK_OPCODE(SGRK);
+  DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+  // 64-bit Non-clobbering arithmetics / bitwise ops.
+  int64_t r2_val = get_register(r2);
+  int64_t r3_val = get_register(r3);
+  bool isOF = CheckOverflowForIntSub(r2_val, r3_val, int64_t);
+  SetS390ConditionCode<int64_t>(r2_val - r3_val, 0);
+  SetS390OverflowCode(isOF);
+  set_register(r1, r2_val - r3_val);
+  return length;
+}
+
+EVALUATE(ALGRK) {
+  DCHECK_OPCODE(ALGRK);
+  DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+  // 64-bit Non-clobbering unsigned arithmetics
+  uint64_t r2_val = get_register(r2);
+  uint64_t r3_val = get_register(r3);
+  bool isOF = CheckOverflowForUIntAdd(r2_val, r3_val);
+  SetS390ConditionCode<uint64_t>(r2_val + r3_val, 0);
+  SetS390OverflowCode(isOF);
+  set_register(r1, r2_val + r3_val);
+  return length;
+}
+
+EVALUATE(SLGRK) {
+  DCHECK_OPCODE(SLGRK);
+  DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+  // 64-bit Non-clobbering unsigned arithmetics
+  uint64_t r2_val = get_register(r2);
+  uint64_t r3_val = get_register(r3);
+  bool isOF = CheckOverflowForUIntSub(r2_val, r3_val);
+  SetS390ConditionCode<uint64_t>(r2_val - r3_val, 0);
+  SetS390OverflowCode(isOF);
+  set_register(r1, r2_val - r3_val);
+  return length;
+}
+
+EVALUATE(LOCR) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
+
+EVALUATE(NRK) {
+  DCHECK_OPCODE(NRK);
+  DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+  // 32-bit Non-clobbering arithmetics / bitwise ops
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  int32_t r3_val = get_low_register<int32_t>(r3);
+  // Assume bitwise operation here
+  uint32_t bitwise_result = 0;
+  bitwise_result = r2_val & r3_val;
+  SetS390BitWiseConditionCode<uint32_t>(bitwise_result);
+  set_low_register(r1, bitwise_result);
+  return length;
+}
+
+EVALUATE(ORK) {
+  DCHECK_OPCODE(ORK);
+  DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+  // 32-bit Non-clobbering arithmetics / bitwise ops
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  int32_t r3_val = get_low_register<int32_t>(r3);
+  // Assume bitwise operation here
+  uint32_t bitwise_result = 0;
+  bitwise_result = r2_val | r3_val;
+  SetS390BitWiseConditionCode<uint32_t>(bitwise_result);
+  set_low_register(r1, bitwise_result);
+  return length;
+}
+
+EVALUATE(XRK) {
+  DCHECK_OPCODE(XRK);
+  DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+  // 32-bit Non-clobbering arithmetics / bitwise ops
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  int32_t r3_val = get_low_register<int32_t>(r3);
+  // Assume bitwise operation here
+  uint32_t bitwise_result = 0;
+  bitwise_result = r2_val ^ r3_val;
+  SetS390BitWiseConditionCode<uint32_t>(bitwise_result);
+  set_low_register(r1, bitwise_result);
+  return length;
+}
 
-EVALUATE(AHHLR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ARK) {
+  DCHECK_OPCODE(ARK);
+  DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+  // 32-bit Non-clobbering arithmetics / bitwise ops
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  int32_t r3_val = get_low_register<int32_t>(r3);
+  bool isOF = CheckOverflowForIntAdd(r2_val, r3_val, int32_t);
+  SetS390ConditionCode<int32_t>(r2_val + r3_val, 0);
+  SetS390OverflowCode(isOF);
+  set_low_register(r1, r2_val + r3_val);
+  return length;
+}
 
-EVALUATE(SHHLR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SRK) {
+  DCHECK_OPCODE(SRK);
+  DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+  // 32-bit Non-clobbering arithmetics / bitwise ops
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  int32_t r3_val = get_low_register<int32_t>(r3);
+  bool isOF = CheckOverflowForIntSub(r2_val, r3_val, int32_t);
+  SetS390ConditionCode<int32_t>(r2_val - r3_val, 0);
+  SetS390OverflowCode(isOF);
+  set_low_register(r1, r2_val - r3_val);
+  return length;
+}
 
-EVALUATE(ALHHLR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ALRK) {
+  DCHECK_OPCODE(ALRK);
+  DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+  // 32-bit Non-clobbering unsigned arithmetics
+  uint32_t r2_val = get_low_register<uint32_t>(r2);
+  uint32_t r3_val = get_low_register<uint32_t>(r3);
+  bool isOF = CheckOverflowForUIntAdd(r2_val, r3_val);
+  SetS390ConditionCode<uint32_t>(r2_val + r3_val, 0);
+  SetS390OverflowCode(isOF);
+  set_low_register(r1, r2_val + r3_val);
+  return length;
+}
 
-EVALUATE(SLHHLR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SLRK) {
+  DCHECK_OPCODE(SLRK);
+  DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+  // 32-bit Non-clobbering unsigned arithmetics
+  uint32_t r2_val = get_low_register<uint32_t>(r2);
+  uint32_t r3_val = get_low_register<uint32_t>(r3);
+  bool isOF = CheckOverflowForUIntSub(r2_val, r3_val);
+  SetS390ConditionCode<uint32_t>(r2_val - r3_val, 0);
+  SetS390OverflowCode(isOF);
+  set_low_register(r1, r2_val - r3_val);
+  return length;
+}
 
-EVALUATE(CHLR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LTG) {
+  DCHECK_OPCODE(LTG);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  intptr_t addr = x2_val + b2_val + d2;
+  int64_t value = ReadDW(addr);
+  set_register(r1, value);
+  SetS390ConditionCode<int64_t>(value, 0);
+  return length;
+}
 
-EVALUATE(POPCNT_Z) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CVBY) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LOCGR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(AG) {
+  DCHECK_OPCODE(AG);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t alu_out = get_register(r1);
+  int64_t mem_val = ReadDW(b2_val + x2_val + d2);
+  alu_out += mem_val;
+  SetS390ConditionCode<int32_t>(alu_out, 0);
+  set_register(r1, alu_out);
+  return length;
+}
 
-EVALUATE(NGRK) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SG) {
+  DCHECK_OPCODE(SG);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t alu_out = get_register(r1);
+  int64_t mem_val = ReadDW(b2_val + x2_val + d2);
+  alu_out -= mem_val;
+  SetS390ConditionCode<int32_t>(alu_out, 0);
+  set_register(r1, alu_out);
+  return length;
+}
 
-EVALUATE(OGRK) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ALG) {
+  DCHECK_OPCODE(ALG);
+#ifndef V8_TARGET_ARCH_S390X
+  DCHECK(false);
+#endif
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  uint64_t r1_val = get_register(r1);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t d2_val = d2;
+  uint64_t alu_out = r1_val;
+  uint64_t mem_val = static_cast<uint64_t>(ReadDW(b2_val + d2_val + x2_val));
+  alu_out += mem_val;
+  SetS390ConditionCode<uint64_t>(alu_out, 0);
+  set_register(r1, alu_out);
+  return length;
+}
 
-EVALUATE(XGRK) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SLG) {
+  DCHECK_OPCODE(SLG);
+#ifndef V8_TARGET_ARCH_S390X
+  DCHECK(false);
+#endif
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  uint64_t r1_val = get_register(r1);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t d2_val = d2;
+  uint64_t alu_out = r1_val;
+  uint64_t mem_val = static_cast<uint64_t>(ReadDW(b2_val + d2_val + x2_val));
+  alu_out -= mem_val;
+  SetS390ConditionCode<uint64_t>(alu_out, 0);
+  set_register(r1, alu_out);
+  return length;
+}
 
-EVALUATE(AGRK) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MSG) {
+  DCHECK_OPCODE(MSG);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t d2_val = d2;
+  int64_t mem_val = ReadDW(b2_val + d2_val + x2_val);
+  int64_t r1_val = get_register(r1);
+  set_register(r1, mem_val * r1_val);
+  return length;
+}
 
-EVALUATE(SGRK) { return DecodeInstructionOriginal(instr); }
+EVALUATE(DSG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(ALGRK) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CVBG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SLGRK) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LRVG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LOCR) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LT) {
+  DCHECK_OPCODE(LT);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  intptr_t addr = x2_val + b2_val + d2;
+  int32_t value = ReadW(addr, instr);
+  set_low_register(r1, value);
+  SetS390ConditionCode<int32_t>(value, 0);
+  return length;
+}
 
-EVALUATE(NRK) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LGH) {
+  DCHECK_OPCODE(LGH);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  // Miscellaneous Loads and Stores
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  intptr_t addr = x2_val + b2_val + d2;
+  int64_t mem_val = static_cast<int64_t>(ReadH(addr, instr));
+  set_register(r1, mem_val);
+  return length;
+}
 
-EVALUATE(ORK) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LLGF) {
+  DCHECK_OPCODE(LLGF);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  // Miscellaneous Loads and Stores
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  intptr_t addr = x2_val + b2_val + d2;
+  uint64_t mem_val = static_cast<uint64_t>(ReadWU(addr, instr));
+  set_register(r1, mem_val);
+  return length;
+}
 
-EVALUATE(XRK) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LLGT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(ARK) { return DecodeInstructionOriginal(instr); }
+EVALUATE(AGF) {
+  DCHECK_OPCODE(AGF);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  uint64_t r1_val = get_register(r1);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t d2_val = d2;
+  uint64_t alu_out = r1_val;
+  uint32_t mem_val = ReadW(b2_val + d2_val + x2_val, instr);
+  alu_out += mem_val;
+  SetS390ConditionCode<int64_t>(alu_out, 0);
+  set_register(r1, alu_out);
+  return length;
+}
 
-EVALUATE(SRK) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SGF) {
+  DCHECK_OPCODE(SGF);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  uint64_t r1_val = get_register(r1);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t d2_val = d2;
+  uint64_t alu_out = r1_val;
+  uint32_t mem_val = ReadW(b2_val + d2_val + x2_val, instr);
+  alu_out -= mem_val;
+  SetS390ConditionCode<int64_t>(alu_out, 0);
+  set_register(r1, alu_out);
+  return length;
+}
 
-EVALUATE(ALRK) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ALGF) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SLRK) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SLGF) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LTG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MSGF) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CVBY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(DSGF) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(AG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LRV) {
+  DCHECK_OPCODE(LRV);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  intptr_t mem_addr = b2_val + x2_val + d2;
+  int32_t mem_val = ReadW(mem_addr, instr);
+  set_low_register(r1, ByteReverse(mem_val));
+  return length;
+}
 
-EVALUATE(SG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LRVH) {
+  DCHECK_OPCODE(LRVH);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  intptr_t mem_addr = b2_val + x2_val + d2;
+  int16_t mem_val = ReadH(mem_addr, instr);
+  int32_t result = ByteReverse(mem_val) & 0x0000ffff;
+  result |= r1_val & 0xffff0000;
+  set_low_register(r1, result);
+  return length;
+}
 
-EVALUATE(ALG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CG) {
+  DCHECK_OPCODE(CG);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t alu_out = get_register(r1);
+  int64_t mem_val = ReadDW(b2_val + x2_val + d2);
+  SetS390ConditionCode<int64_t>(alu_out, mem_val);
+  set_register(r1, alu_out);
+  return length;
+}
 
-EVALUATE(SLG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CLG) {
+  DCHECK_OPCODE(CLG);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t alu_out = get_register(r1);
+  int64_t mem_val = ReadDW(b2_val + x2_val + d2);
+  SetS390ConditionCode<uint64_t>(alu_out, mem_val);
+  set_register(r1, alu_out);
+  return length;
+}
 
-EVALUATE(MSG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(NTSTG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(DSG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CVDY) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CVBG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CVDG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LRVG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STRVG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CGF) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LGH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CLGF) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LLGF) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LTGF) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LLGT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CGH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(AGF) { return DecodeInstructionOriginal(instr); }
+EVALUATE(PFD) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SGF) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STRV) {
+  DCHECK_OPCODE(STRV);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  intptr_t mem_addr = b2_val + x2_val + d2;
+  WriteW(mem_addr, ByteReverse(r1_val), instr);
+  return length;
+}
 
-EVALUATE(ALGF) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STRVH) {
+  DCHECK_OPCODE(STRVH);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  intptr_t mem_addr = b2_val + x2_val + d2;
+  int16_t result = static_cast<int16_t>(r1_val >> 16);
+  WriteH(mem_addr, ByteReverse(result), instr);
+  return length;
+}
 
-EVALUATE(SLGF) { return DecodeInstructionOriginal(instr); }
+EVALUATE(BCTG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MSGF) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MSY) {
+  DCHECK_OPCODE(MSY);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t d2_val = d2;
+  int32_t mem_val = ReadW(b2_val + d2_val + x2_val, instr);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  set_low_register(r1, mem_val * r1_val);
+  return length;
+}
 
-EVALUATE(DSGF) { return DecodeInstructionOriginal(instr); }
+EVALUATE(NY) {
+  DCHECK_OPCODE(NY);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int32_t alu_out = get_low_register<int32_t>(r1);
+  int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+  alu_out &= mem_val;
+  SetS390BitWiseConditionCode<uint32_t>(alu_out);
+  set_low_register(r1, alu_out);
+  return length;
+}
 
-EVALUATE(LRV) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CLY) {
+  DCHECK_OPCODE(CLY);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  uint32_t alu_out = get_low_register<uint32_t>(r1);
+  uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+  SetS390ConditionCode<uint32_t>(alu_out, mem_val);
+  return length;
+}
 
-EVALUATE(LRVH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(OY) {
+  DCHECK_OPCODE(OY);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int32_t alu_out = get_low_register<int32_t>(r1);
+  int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+  alu_out |= mem_val;
+  SetS390BitWiseConditionCode<uint32_t>(alu_out);
+  set_low_register(r1, alu_out);
+  return length;
+}
 
-EVALUATE(CG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(XY) {
+  DCHECK_OPCODE(XY);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int32_t alu_out = get_low_register<int32_t>(r1);
+  int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+  alu_out ^= mem_val;
+  SetS390BitWiseConditionCode<uint32_t>(alu_out);
+  set_low_register(r1, alu_out);
+  return length;
+}
 
-EVALUATE(CLG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CY) {
+  DCHECK_OPCODE(CY);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int32_t alu_out = get_low_register<int32_t>(r1);
+  int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+  SetS390ConditionCode<int32_t>(alu_out, mem_val);
+  return length;
+}
 
-EVALUATE(NTSTG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(AY) {
+  DCHECK_OPCODE(AY);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int32_t alu_out = get_low_register<int32_t>(r1);
+  int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+  bool isOF = false;
+  isOF = CheckOverflowForIntAdd(alu_out, mem_val, int32_t);
+  alu_out += mem_val;
+  SetS390ConditionCode<int32_t>(alu_out, 0);
+  SetS390OverflowCode(isOF);
+  set_low_register(r1, alu_out);
+  return length;
+}
 
-EVALUATE(CVDY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SY) {
+  DCHECK_OPCODE(SY);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int32_t alu_out = get_low_register<int32_t>(r1);
+  int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+  bool isOF = false;
+  isOF = CheckOverflowForIntSub(alu_out, mem_val, int32_t);
+  alu_out -= mem_val;
+  SetS390ConditionCode<int32_t>(alu_out, 0);
+  SetS390OverflowCode(isOF);
+  set_low_register(r1, alu_out);
+  return length;
+}
 
-EVALUATE(CVDG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MFY) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(STRVG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ALY) {
+  DCHECK_OPCODE(ALY);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  uint32_t alu_out = get_low_register<uint32_t>(r1);
+  uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+  alu_out += mem_val;
+  set_low_register(r1, alu_out);
+  SetS390ConditionCode<uint32_t>(alu_out, 0);
+  return length;
+}
 
-EVALUATE(CGF) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SLY) {
+  DCHECK_OPCODE(SLY);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  uint32_t alu_out = get_low_register<uint32_t>(r1);
+  uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+  alu_out -= mem_val;
+  set_low_register(r1, alu_out);
+  SetS390ConditionCode<uint32_t>(alu_out, 0);
+  return length;
+}
 
-EVALUATE(CLGF) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STHY) {
+  DCHECK_OPCODE(STHY);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  // Miscellaneous Loads and Stores
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  intptr_t addr = x2_val + b2_val + d2;
+  uint16_t value = get_low_register<uint32_t>(r1);
+  WriteH(addr, value, instr);
+  return length;
+}
 
-EVALUATE(LTGF) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LAY) {
+  DCHECK_OPCODE(LAY);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  // Load Address
+  int rb = b2;
+  int rx = x2;
+  int offset = d2;
+  int64_t rb_val = (rb == 0) ? 0 : get_register(rb);
+  int64_t rx_val = (rx == 0) ? 0 : get_register(rx);
+  set_register(r1, rx_val + rb_val + offset);
+  return length;
+}
 
-EVALUATE(CGH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STCY) {
+  DCHECK_OPCODE(STCY);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  // Miscellaneous Loads and Stores
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  intptr_t addr = x2_val + b2_val + d2;
+  uint8_t value = get_low_register<uint32_t>(r1);
+  WriteB(addr, value);
+  return length;
+}
 
-EVALUATE(PFD) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ICY) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(STRV) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LAEY) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(STRVH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LB) {
+  DCHECK_OPCODE(LB);
+  // Miscellaneous Loads and Stores
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  intptr_t addr = x2_val + b2_val + d2;
+  int32_t mem_val = ReadB(addr);
+  set_low_register(r1, mem_val);
+  return length;
+}
 
-EVALUATE(BCTG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LGB) {
+  DCHECK_OPCODE(LGB);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  // Miscellaneous Loads and Stores
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  intptr_t addr = x2_val + b2_val + d2;
+  int64_t mem_val = ReadB(addr);
+  set_register(r1, mem_val);
+  return length;
+}
 
-EVALUATE(MSY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LHY) {
+  DCHECK_OPCODE(LHY);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  // Miscellaneous Loads and Stores
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  intptr_t addr = x2_val + b2_val + d2;
+  int32_t result = static_cast<int32_t>(ReadH(addr, instr));
+  set_low_register(r1, result);
+  return length;
+}
 
-EVALUATE(NY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CHY) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CLY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(AHY) {
+  DCHECK_OPCODE(AHY);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t d2_val = d2;
+  int32_t mem_val =
+      static_cast<int32_t>(ReadH(b2_val + d2_val + x2_val, instr));
+  int32_t alu_out = 0;
+  bool isOF = false;
+  alu_out = r1_val + mem_val;
+  isOF = CheckOverflowForIntAdd(r1_val, mem_val, int32_t);
+  set_low_register(r1, alu_out);
+  SetS390ConditionCode<int32_t>(alu_out, 0);
+  SetS390OverflowCode(isOF);
+  return length;
+}
 
-EVALUATE(OY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SHY) {
+  DCHECK_OPCODE(SHY);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t d2_val = d2;
+  int32_t mem_val =
+      static_cast<int32_t>(ReadH(b2_val + d2_val + x2_val, instr));
+  int32_t alu_out = 0;
+  bool isOF = false;
+  alu_out = r1_val - mem_val;
+  isOF = CheckOverflowForIntSub(r1_val, mem_val, int64_t);
+  set_low_register(r1, alu_out);
+  SetS390ConditionCode<int32_t>(alu_out, 0);
+  SetS390OverflowCode(isOF);
+  return length;
+}
 
-EVALUATE(XY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MHY) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(NG) {
+  DCHECK_OPCODE(NG);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t alu_out = get_register(r1);
+  int64_t mem_val = ReadDW(b2_val + x2_val + d2);
+  alu_out &= mem_val;
+  SetS390BitWiseConditionCode<uint32_t>(alu_out);
+  set_register(r1, alu_out);
+  return length;
+}
 
-EVALUATE(AY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(OG) {
+  DCHECK_OPCODE(OG);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t alu_out = get_register(r1);
+  int64_t mem_val = ReadDW(b2_val + x2_val + d2);
+  alu_out |= mem_val;
+  SetS390BitWiseConditionCode<uint32_t>(alu_out);
+  set_register(r1, alu_out);
+  return length;
+}
 
-EVALUATE(SY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(XG) {
+  DCHECK_OPCODE(XG);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t alu_out = get_register(r1);
+  int64_t mem_val = ReadDW(b2_val + x2_val + d2);
+  alu_out ^= mem_val;
+  SetS390BitWiseConditionCode<uint32_t>(alu_out);
+  set_register(r1, alu_out);
+  return length;
+}
 
-EVALUATE(MFY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LGAT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(ALY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MLG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SLY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(DLG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(STHY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ALCG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LAY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SLBG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(STCY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STPQ) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(ICY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LPQ) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LAEY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LLGH) {
+  DCHECK_OPCODE(LLGH);
+  // Load Logical Halfword
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t d2_val = d2;
+  uint16_t mem_val = ReadHU(b2_val + d2_val + x2_val, instr);
+  set_register(r1, mem_val);
+  return length;
+}
 
-EVALUATE(LB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LLH) {
+  DCHECK_OPCODE(LLH);
+  // Load Logical Halfword
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t d2_val = d2;
+  uint16_t mem_val = ReadHU(b2_val + d2_val + x2_val, instr);
+  set_low_register(r1, mem_val);
+  return length;
+}
 
-EVALUATE(LGB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ML) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LHY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(DL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CHY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ALC) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(AHY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SLB) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SHY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LLGTAT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MHY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LLGFAT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(NG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LAT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(OG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LBH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(XG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LLCH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LGAT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STCH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MLG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LHH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(DLG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LLHH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(ALCG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STHH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SLBG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LFHAT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(STPQ) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LFH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LPQ) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STFH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LLGH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CHF) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LLH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MVCDK) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(ML) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MVHHI) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(DL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MVGHI) {
+  DCHECK_OPCODE(MVGHI);
+  // Move Integer (64)
+  DECODE_SIL_INSTRUCTION(b1, d1, i2);
+  int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+  intptr_t src_addr = b1_val + d1;
+  WriteDW(src_addr, i2);
+  return length;
+}
 
-EVALUATE(ALC) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MVHI) {
+  DCHECK_OPCODE(MVHI);
+  // Move Integer (32)
+  DECODE_SIL_INSTRUCTION(b1, d1, i2);
+  int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+  intptr_t src_addr = b1_val + d1;
+  WriteW(src_addr, i2, instr);
+  return length;
+}
 
-EVALUATE(SLB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CHHSI) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LLGTAT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CGHSI) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LLGFAT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CHSI) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LAT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CLFHSI) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LBH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TBEGIN) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LLCH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TBEGINC) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(STCH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LMG) {
+  DCHECK_OPCODE(LMG);
+  // Store Multiple 64-bits.
+  DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+  int rb = b2;
+  int offset = d2;
 
-EVALUATE(LHH) { return DecodeInstructionOriginal(instr); }
+  // Regs roll around if r3 is less than r1.
+  // Artifically increase r3 by 16 so we can calculate
+  // the number of regs stored properly.
+  if (r3 < r1) r3 += 16;
 
-EVALUATE(LLHH) { return DecodeInstructionOriginal(instr); }
+  int64_t rb_val = (rb == 0) ? 0 : get_register(rb);
 
-EVALUATE(STHH) { return DecodeInstructionOriginal(instr); }
+  // Store each register in ascending order.
+  for (int i = 0; i <= r3 - r1; i++) {
+    int64_t value = ReadDW(rb_val + offset + 8 * i);
+    set_register((r1 + i) % 16, value);
+  }
+  return length;
+}
 
-EVALUATE(LFHAT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SRAG) {
+  DCHECK_OPCODE(SRAG);
+  // 64-bit non-clobbering shift-left/right arithmetic
+  DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+  // only takes rightmost 6 bits
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int shiftBits = (b2_val + d2) & 0x3F;
+  int64_t r3_val = get_register(r3);
+  intptr_t alu_out = 0;
+  bool isOF = false;
+  alu_out = r3_val >> shiftBits;
+  set_register(r1, alu_out);
+  SetS390ConditionCode<intptr_t>(alu_out, 0);
+  SetS390OverflowCode(isOF);
+  return length;
+}
 
-EVALUATE(LFH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SLAG) {
+  DCHECK_OPCODE(SLAG);
+  // 64-bit non-clobbering shift-left/right arithmetic
+  DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+  // only takes rightmost 6 bits
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int shiftBits = (b2_val + d2) & 0x3F;
+  int64_t r3_val = get_register(r3);
+  intptr_t alu_out = 0;
+  bool isOF = false;
+  isOF = CheckOverflowForShiftLeft(r3_val, shiftBits);
+  alu_out = r3_val << shiftBits;
+  set_register(r1, alu_out);
+  SetS390ConditionCode<intptr_t>(alu_out, 0);
+  SetS390OverflowCode(isOF);
+  return length;
+}
 
-EVALUATE(STFH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SRLG) {
+  DCHECK_OPCODE(SRLG);
+  // For SLLG/SRLG, the 64-bit third operand is shifted the number
+  // of bits specified by the second-operand address, and the result is
+  // placed at the first-operand location. Except for when the R1 and R3
+  // fields designate the same register, the third operand remains
+  // unchanged in general register R3.
+  DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+  // only takes rightmost 6 bits
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int shiftBits = (b2_val + d2) & 0x3F;
+  // unsigned
+  uint64_t r3_val = get_register(r3);
+  uint64_t alu_out = 0;
+  alu_out = r3_val >> shiftBits;
+  set_register(r1, alu_out);
+  return length;
+}
 
-EVALUATE(CHF) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SLLG) {
+  DCHECK_OPCODE(SLLG);
+  // For SLLG/SRLG, the 64-bit third operand is shifted the number
+  // of bits specified by the second-operand address, and the result is
+  // placed at the first-operand location. Except for when the R1 and R3
+  // fields designate the same register, the third operand remains
+  // unchanged in general register R3.
+  DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+  // only takes rightmost 6 bits
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int shiftBits = (b2_val + d2) & 0x3F;
+  // unsigned
+  uint64_t r3_val = get_register(r3);
+  uint64_t alu_out = 0;
+  alu_out = r3_val << shiftBits;
+  set_register(r1, alu_out);
+  return length;
+}
 
-EVALUATE(MVCDK) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CSY) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MVHHI) { return DecodeInstructionOriginal(instr); }
+EVALUATE(RLLG) {
+  DCHECK_OPCODE(RLLG);
+  // For SLLG/SRLG, the 64-bit third operand is shifted the number
+  // of bits specified by the second-operand address, and the result is
+  // placed at the first-operand location. Except for when the R1 and R3
+  // fields designate the same register, the third operand remains
+  // unchanged in general register R3.
+  DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+  // only takes rightmost 6 bits
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int shiftBits = (b2_val + d2) & 0x3F;
+  // unsigned
+  uint64_t r3_val = get_register(r3);
+  uint64_t alu_out = 0;
+  uint64_t rotateBits = r3_val >> (64 - shiftBits);
+  alu_out = (r3_val << shiftBits) | (rotateBits);
+  set_register(r1, alu_out);
+  return length;
+}
 
-EVALUATE(MVGHI) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STMG) {
+  DCHECK_OPCODE(STMG);
+  DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+  int rb = b2;
+  int offset = d2;
 
-EVALUATE(MVHI) { return DecodeInstructionOriginal(instr); }
+  // Regs roll around if r3 is less than r1.
+  // Artifically increase r3 by 16 so we can calculate
+  // the number of regs stored properly.
+  if (r3 < r1) r3 += 16;
 
-EVALUATE(CHHSI) { return DecodeInstructionOriginal(instr); }
+  int64_t rb_val = (rb == 0) ? 0 : get_register(rb);
 
-EVALUATE(CGHSI) { return DecodeInstructionOriginal(instr); }
+  // Store each register in ascending order.
+  for (int i = 0; i <= r3 - r1; i++) {
+    int64_t value = get_register((r1 + i) % 16);
+    WriteDW(rb_val + offset + 8 * i, value);
+  }
+  return length;
+}
 
-EVALUATE(CHSI) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STMH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CLFHSI) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STCMH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TBEGIN) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STCMY) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TBEGINC) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CDSY) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LMG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CDSG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SRAG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(BXHG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SLAG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(BXLEG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SRLG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ECAG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SLLG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TMY) {
+  DCHECK_OPCODE(TMY);
+  // Test Under Mask (Mem - Imm) (8)
+  DECODE_SIY_INSTRUCTION(b1, d1, i2);
+  int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+  intptr_t d1_val = d1;
+  intptr_t addr = b1_val + d1_val;
+  uint8_t mem_val = ReadB(addr);
+  uint8_t imm_val = i2;
+  uint8_t selected_bits = mem_val & imm_val;
+  // CC0: Selected bits are zero
+  // CC1: Selected bits mixed zeros and ones
+  // CC3: Selected bits all ones
+  if (0 == selected_bits) {
+    condition_reg_ = CC_EQ;  // CC0
+  } else if (selected_bits == imm_val) {
+    condition_reg_ = 0x1;  // CC3
+  } else {
+    condition_reg_ = 0x4;  // CC1
+  }
+  return length;
+}
 
-EVALUATE(CSY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MVIY) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(RLLG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(NIY) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(STMG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CLIY) {
+  DCHECK_OPCODE(CLIY);
+  DECODE_SIY_INSTRUCTION(b1, d1, i2);
+  // Compare Immediate (Mem - Imm) (8)
+  int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+  intptr_t d1_val = d1;
+  intptr_t addr = b1_val + d1_val;
+  uint8_t mem_val = ReadB(addr);
+  uint8_t imm_val = i2;
+  SetS390ConditionCode<uint8_t>(mem_val, imm_val);
+  return length;
+}
 
-EVALUATE(STMH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(OIY) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(STCMH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(XIY) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(STCMY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ASI) {
+  DCHECK_OPCODE(ASI);
+  // TODO(bcleung): Change all fooInstr->I2Value() to template functions.
+  // The below static cast to 8 bit and then to 32 bit is necessary
+  // because siyInstr->I2Value() returns a uint8_t, which a direct
+  // cast to int32_t could incorrectly interpret.
+  DECODE_SIY_INSTRUCTION(b1, d1, i2_unsigned);
+  int8_t i2_8bit = static_cast<int8_t>(i2_unsigned);
+  int32_t i2 = static_cast<int32_t>(i2_8bit);
+  intptr_t b1_val = (b1 == 0) ? 0 : get_register(b1);
 
-EVALUATE(CDSY) { return DecodeInstructionOriginal(instr); }
+  int d1_val = d1;
+  intptr_t addr = b1_val + d1_val;
 
-EVALUATE(CDSG) { return DecodeInstructionOriginal(instr); }
+  int32_t mem_val = ReadW(addr, instr);
+  bool isOF = CheckOverflowForIntAdd(mem_val, i2, int32_t);
+  int32_t alu_out = mem_val + i2;
+  SetS390ConditionCode<int32_t>(alu_out, 0);
+  SetS390OverflowCode(isOF);
+  WriteW(addr, alu_out, instr);
+  return length;
+}
 
-EVALUATE(BXHG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ALSI) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(BXLEG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(AGSI) {
+  DCHECK_OPCODE(AGSI);
+  // TODO(bcleung): Change all fooInstr->I2Value() to template functions.
+  // The below static cast to 8 bit and then to 32 bit is necessary
+  // because siyInstr->I2Value() returns a uint8_t, which a direct
+  // cast to int32_t could incorrectly interpret.
+  DECODE_SIY_INSTRUCTION(b1, d1, i2_unsigned);
+  int8_t i2_8bit = static_cast<int8_t>(i2_unsigned);
+  int64_t i2 = static_cast<int64_t>(i2_8bit);
+  intptr_t b1_val = (b1 == 0) ? 0 : get_register(b1);
 
-EVALUATE(ECAG) { return DecodeInstructionOriginal(instr); }
+  int d1_val = d1;
+  intptr_t addr = b1_val + d1_val;
 
-EVALUATE(TMY) { return DecodeInstructionOriginal(instr); }
+  int64_t mem_val = ReadDW(addr);
+  int isOF = CheckOverflowForIntAdd(mem_val, i2, int64_t);
+  int64_t alu_out = mem_val + i2;
+  SetS390ConditionCode<uint64_t>(alu_out, 0);
+  SetS390OverflowCode(isOF);
+  WriteDW(addr, alu_out);
+  return length;
+}
 
-EVALUATE(MVIY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ALGSI) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(NIY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ICMH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CLIY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ICMY) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(OIY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MVCLU) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(XIY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CLCLU) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(ASI) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STMY) {
+  DCHECK_OPCODE(STMY);
+  DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+  // Load/Store Multiple (32)
+  int offset = d2;
 
-EVALUATE(ALSI) { return DecodeInstructionOriginal(instr); }
+  // Regs roll around if r3 is less than r1.
+  // Artifically increase r3 by 16 so we can calculate
+  // the number of regs stored properly.
+  if (r3 < r1) r3 += 16;
 
-EVALUATE(AGSI) { return DecodeInstructionOriginal(instr); }
+  int32_t b2_val = (b2 == 0) ? 0 : get_low_register<int32_t>(b2);
 
-EVALUATE(ALGSI) { return DecodeInstructionOriginal(instr); }
+  // Store each register in ascending order.
+  for (int i = 0; i <= r3 - r1; i++) {
+    int32_t value = get_low_register<int32_t>((r1 + i) % 16);
+    WriteW(b2_val + offset + 4 * i, value, instr);
+  }
+  return length;
+}
 
-EVALUATE(ICMH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LMH) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(ICMY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LMY) {
+  DCHECK_OPCODE(LMY);
+  DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+  // Load/Store Multiple (32)
+  int offset = d2;
 
-EVALUATE(MVCLU) { return DecodeInstructionOriginal(instr); }
+  // Regs roll around if r3 is less than r1.
+  // Artifically increase r3 by 16 so we can calculate
+  // the number of regs stored properly.
+  if (r3 < r1) r3 += 16;
 
-EVALUATE(CLCLU) { return DecodeInstructionOriginal(instr); }
+  int32_t b2_val = (b2 == 0) ? 0 : get_low_register<int32_t>(b2);
 
-EVALUATE(STMY) { return DecodeInstructionOriginal(instr); }
+  // Store each register in ascending order.
+  for (int i = 0; i <= r3 - r1; i++) {
+    int32_t value = ReadW(b2_val + offset + 4 * i, instr);
+    set_low_register((r1 + i) % 16, value);
+  }
+  return length;
+}
 
-EVALUATE(LMH) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TP) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LMY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SRAK) {
+  DCHECK_OPCODE(SRAK);
+  DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+  // 32-bit non-clobbering shift-left/right arithmetic
+  // only takes rightmost 6 bits
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int shiftBits = (b2_val + d2) & 0x3F;
+  int32_t r3_val = get_low_register<int32_t>(r3);
+  int32_t alu_out = 0;
+  bool isOF = false;
+  alu_out = r3_val >> shiftBits;
+  set_low_register(r1, alu_out);
+  SetS390ConditionCode<int32_t>(alu_out, 0);
+  SetS390OverflowCode(isOF);
+  return length;
+}
 
-EVALUATE(TP) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SLAK) {
+  DCHECK_OPCODE(SLAK);
+  DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+  // 32-bit non-clobbering shift-left/right arithmetic
+  // only takes rightmost 6 bits
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int shiftBits = (b2_val + d2) & 0x3F;
+  int32_t r3_val = get_low_register<int32_t>(r3);
+  int32_t alu_out = 0;
+  bool isOF = false;
+  isOF = CheckOverflowForShiftLeft(r3_val, shiftBits);
+  alu_out = r3_val << shiftBits;
+  set_low_register(r1, alu_out);
+  SetS390ConditionCode<int32_t>(alu_out, 0);
+  SetS390OverflowCode(isOF);
+  return length;
+}
 
-EVALUATE(SRAK) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SRLK) {
+  DCHECK_OPCODE(SRLK);
+  // For SLLK/SRLL, the 32-bit third operand is shifted the number
+  // of bits specified by the second-operand address, and the result is
+  // placed at the first-operand location. Except for when the R1 and R3
+  // fields designate the same register, the third operand remains
+  // unchanged in general register R3.
+  DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+  // only takes rightmost 6 bits
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int shiftBits = (b2_val + d2) & 0x3F;
+  // unsigned
+  uint32_t r3_val = get_low_register<uint32_t>(r3);
+  uint32_t alu_out = 0;
+  alu_out = r3_val >> shiftBits;
+  set_low_register(r1, alu_out);
+  return length;
+}
 
-EVALUATE(SLAK) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SLLK) {
+  DCHECK_OPCODE(SLLK);
+  // For SLLK/SRLL, the 32-bit third operand is shifted the number
+  // of bits specified by the second-operand address, and the result is
+  // placed at the first-operand location. Except for when the R1 and R3
+  // fields designate the same register, the third operand remains
+  // unchanged in general register R3.
+  DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+  // only takes rightmost 6 bits
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int shiftBits = (b2_val + d2) & 0x3F;
+  // unsigned
+  uint32_t r3_val = get_low_register<uint32_t>(r3);
+  uint32_t alu_out = 0;
+  alu_out = r3_val << shiftBits;
+  set_low_register(r1, alu_out);
+  return length;
+}
 
-EVALUATE(SRLK) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LOCG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SLLK) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STOCG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LOCG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LANG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(STOCG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LAOG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LANG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LAXG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LAOG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LAAG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LAXG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LAALG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LAAG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LOC) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LAALG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STOC) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LOC) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LAN) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(STOC) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LAO) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LAN) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LAX) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LAO) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LAA) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LAX) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LAAL) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LAA) { return DecodeInstructionOriginal(instr); }
+EVALUATE(BRXHG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LAAL) { return DecodeInstructionOriginal(instr); }
+EVALUATE(BRXLG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(BRXHG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(RISBLG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(BRXLG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(RNSBG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(RISBLG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ROSBG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(RNSBG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(RXSBG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(ROSBG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(RISBGN) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(RXSBG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(RISBHG) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(RISBGN) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CGRJ) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(RISBHG) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CGIT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CGRJ) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CIT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CGIT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CLFIT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CIT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CGIJ) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CLFIT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CIJ) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CGIJ) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ALHSIK) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CIJ) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ALGHSIK) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(ALHSIK) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CGRB) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(ALGHSIK) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CGIB) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CGRB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CIB) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CGIB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LDEB) {
+  DCHECK_OPCODE(LDEB);
+  // Load Float
+  DECODE_RXE_INSTRUCTION(r1, b2, x2, d2);
+  int rb = b2;
+  int rx = x2;
+  int offset = d2;
+  int64_t rb_val = (rb == 0) ? 0 : get_register(rb);
+  int64_t rx_val = (rx == 0) ? 0 : get_register(rx);
+  double ret =
+      static_cast<double>(*reinterpret_cast<float*>(rx_val + rb_val + offset));
+  set_d_register_from_double(r1, ret);
+  return length;
+}
 
-EVALUATE(CIB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LXDB) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LDEB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LXEB) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LXDB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MXDB) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LXEB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(KEB) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MXDB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CEB) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(KEB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(AEB) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CEB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SEB) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(AEB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MDEB) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SEB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(DEB) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MDEB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MAEB) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(DEB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MSEB) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MAEB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TCEB) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MSEB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TCDB) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TCEB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TCXB) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TCDB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SQEB) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TCXB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SQDB) {
+  DCHECK_OPCODE(SQDB);
+  DECODE_RXE_INSTRUCTION(r1, b2, x2, d2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t d2_val = d2;
+  double r1_val = get_double_from_d_register(r1);
+  double dbl_val = ReadDouble(b2_val + x2_val + d2_val);
+  r1_val = std::sqrt(dbl_val);
+  set_d_register_from_double(r1, r1_val);
+  return length;
+}
 
-EVALUATE(SQEB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MEEB) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SQDB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(KDB) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MEEB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CDB) {
+  DCHECK_OPCODE(CDB);
 
-EVALUATE(KDB) { return DecodeInstructionOriginal(instr); }
+  DECODE_RXE_INSTRUCTION(r1, b2, x2, d2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t d2_val = d2;
+  double r1_val = get_double_from_d_register(r1);
+  double dbl_val = ReadDouble(b2_val + x2_val + d2_val);
+  SetS390ConditionCode<double>(r1_val, dbl_val);
+  return length;
+}
 
-EVALUATE(CDB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(ADB) {
+  DCHECK_OPCODE(ADB);
 
-EVALUATE(ADB) { return DecodeInstructionOriginal(instr); }
+  DECODE_RXE_INSTRUCTION(r1, b2, x2, d2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t d2_val = d2;
+  double r1_val = get_double_from_d_register(r1);
+  double dbl_val = ReadDouble(b2_val + x2_val + d2_val);
+  r1_val += dbl_val;
+  set_d_register_from_double(r1, r1_val);
+  SetS390ConditionCode<double>(r1_val, 0);
+  return length;
+}
 
-EVALUATE(SDB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SDB) {
+  DCHECK_OPCODE(SDB);
+  DECODE_RXE_INSTRUCTION(r1, b2, x2, d2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t d2_val = d2;
+  double r1_val = get_double_from_d_register(r1);
+  double dbl_val = ReadDouble(b2_val + x2_val + d2_val);
+  r1_val -= dbl_val;
+  set_d_register_from_double(r1, r1_val);
+  SetS390ConditionCode<double>(r1_val, 0);
+  return length;
+}
 
-EVALUATE(MDB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MDB) {
+  DCHECK_OPCODE(MDB);
+  DECODE_RXE_INSTRUCTION(r1, b2, x2, d2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t d2_val = d2;
+  double r1_val = get_double_from_d_register(r1);
+  double dbl_val = ReadDouble(b2_val + x2_val + d2_val);
+  r1_val *= dbl_val;
+  set_d_register_from_double(r1, r1_val);
+  SetS390ConditionCode<double>(r1_val, 0);
+  return length;
+}
 
-EVALUATE(DDB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(DDB) {
+  DCHECK_OPCODE(DDB);
+  DECODE_RXE_INSTRUCTION(r1, b2, x2, d2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t d2_val = d2;
+  double r1_val = get_double_from_d_register(r1);
+  double dbl_val = ReadDouble(b2_val + x2_val + d2_val);
+  r1_val /= dbl_val;
+  set_d_register_from_double(r1, r1_val);
+  SetS390ConditionCode<double>(r1_val, 0);
+  return length;
+}
 
-EVALUATE(MADB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MADB) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(MSDB) { return DecodeInstructionOriginal(instr); }
+EVALUATE(MSDB) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SLDT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SLDT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SRDT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SRDT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SLXT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SLXT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(SRXT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(SRXT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TDCET) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TDCET) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TDGET) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TDGET) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TDCDT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TDCDT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TDGDT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TDGDT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TDCXT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TDCXT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(TDGXT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(TDGXT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(LEY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LEY) {
+  DCHECK_OPCODE(LEY);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  // Miscellaneous Loads and Stores
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  intptr_t addr = x2_val + b2_val + d2;
+  float float_val = *reinterpret_cast<float*>(addr);
+  set_d_register_from_float32(r1, float_val);
+  return length;
+}
 
-EVALUATE(LDY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(LDY) {
+  DCHECK_OPCODE(LDY);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  // Miscellaneous Loads and Stores
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  intptr_t addr = x2_val + b2_val + d2;
+  uint64_t dbl_val = *reinterpret_cast<uint64_t*>(addr);
+  set_d_register(r1, dbl_val);
+  return length;
+}
 
-EVALUATE(STEY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STEY) {
+  DCHECK_OPCODE(STEY);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  // Miscellaneous Loads and Stores
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  intptr_t addr = x2_val + b2_val + d2;
+  int64_t frs_val = get_d_register(r1) >> 32;
+  WriteW(addr, static_cast<int32_t>(frs_val), instr);
+  return length;
+}
 
-EVALUATE(STDY) { return DecodeInstructionOriginal(instr); }
+EVALUATE(STDY) {
+  DCHECK_OPCODE(STDY);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  // Miscellaneous Loads and Stores
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  intptr_t addr = x2_val + b2_val + d2;
+  int64_t frs_val = get_d_register(r1);
+  WriteDW(addr, frs_val);
+  return length;
+}
 
-EVALUATE(CZDT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CZDT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CZXT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CZXT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CDZT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CDZT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
-EVALUATE(CXZT) { return DecodeInstructionOriginal(instr); }
+EVALUATE(CXZT) {
+  UNIMPLEMENTED();
+  USE(instr);
+  return 0;
+}
 
 #undef EVALUATE
 
diff --git a/src/s390/simulator-s390.h b/src/s390/simulator-s390.h
index 6e82c9a..b9ee25d 100644
--- a/src/s390/simulator-s390.h
+++ b/src/s390/simulator-s390.h
@@ -64,7 +64,7 @@
 // Running with a simulator.
 
 #include "src/assembler.h"
-#include "src/hashmap.h"
+#include "src/base/hashmap.h"
 #include "src/s390/constants-s390.h"
 
 namespace v8 {
@@ -211,7 +211,7 @@
   // Call on program start.
   static void Initialize(Isolate* isolate);
 
-  static void TearDown(HashMap* i_cache, Redirection* first);
+  static void TearDown(base::HashMap* i_cache, Redirection* first);
 
   // V8 generally calls into generated JS code with 5 parameters and into
   // generated RegExp code with 7 parameters. This is a convenience function,
@@ -233,8 +233,7 @@
   char* last_debugger_input() { return last_debugger_input_; }
 
   // ICache checking.
-  static void FlushICache(v8::internal::HashMap* i_cache, void* start,
-                          size_t size);
+  static void FlushICache(base::HashMap* i_cache, void* start, size_t size);
 
   // Returns true if pc register contains one of the 'special_values' defined
   // below (bad_lr, end_sim_pc).
@@ -444,10 +443,9 @@
   void ExecuteInstruction(Instruction* instr, bool auto_incr_pc = true);
 
   // ICache.
-  static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
-  static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
-                           int size);
-  static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
+  static void CheckICache(base::HashMap* i_cache, Instruction* instr);
+  static void FlushOnePage(base::HashMap* i_cache, intptr_t start, int size);
+  static CachePage* GetCachePage(base::HashMap* i_cache, void* page);
 
   // Runtime call support.
   static void* RedirectExternalReference(
@@ -482,7 +480,7 @@
   char* last_debugger_input_;
 
   // Icache simulation
-  v8::internal::HashMap* i_cache_;
+  base::HashMap* i_cache_;
 
   // Registered breakpoints.
   Instruction* break_pc_;
diff --git a/src/snapshot/code-serializer.cc b/src/snapshot/code-serializer.cc
index 1a2e077..4229607 100644
--- a/src/snapshot/code-serializer.cc
+++ b/src/snapshot/code-serializer.cc
@@ -7,7 +7,6 @@
 #include "src/code-stubs.h"
 #include "src/log.h"
 #include "src/macro-assembler.h"
-#include "src/profiler/cpu-profiler.h"
 #include "src/snapshot/deserializer.h"
 #include "src/version.h"
 
@@ -27,15 +26,14 @@
   }
 
   // Serialize code object.
-  SnapshotByteSink sink(info->code()->CodeSize() * 2);
-  CodeSerializer cs(isolate, &sink, *source);
+  CodeSerializer cs(isolate, *source);
   DisallowHeapAllocation no_gc;
   Object** location = Handle<Object>::cast(info).location();
   cs.VisitPointer(location);
   cs.SerializeDeferredObjects();
   cs.Pad();
 
-  SerializedCodeData data(sink.data(), cs);
+  SerializedCodeData data(cs.sink()->data(), &cs);
   ScriptData* script_data = data.GetScriptData();
 
   if (FLAG_profile_deserialization) {
@@ -49,13 +47,15 @@
 
 void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
                                      WhereToPoint where_to_point, int skip) {
+  if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
+
   int root_index = root_index_map_.Lookup(obj);
   if (root_index != RootIndexMap::kInvalidRootIndex) {
     PutRoot(root_index, obj, how_to_code, where_to_point, skip);
     return;
   }
 
-  if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
+  if (SerializeBackReference(obj, how_to_code, where_to_point, skip)) return;
 
   FlushSkip(skip);
 
@@ -106,7 +106,7 @@
                                       HowToCode how_to_code,
                                       WhereToPoint where_to_point) {
   // Object has not yet been serialized.  Serialize it here.
-  ObjectSerializer serializer(this, heap_object, sink_, how_to_code,
+  ObjectSerializer serializer(this, heap_object, &sink_, how_to_code,
                               where_to_point);
   serializer.Serialize();
 }
@@ -124,8 +124,8 @@
            isolate()->builtins()->name(builtin_index));
   }
 
-  sink_->Put(kBuiltin + how_to_code + where_to_point, "Builtin");
-  sink_->PutInt(builtin_index, "builtin_index");
+  sink_.Put(kBuiltin + how_to_code + where_to_point, "Builtin");
+  sink_.PutInt(builtin_index, "builtin_index");
 }
 
 void CodeSerializer::SerializeCodeStub(Code* code_stub, HowToCode how_to_code,
@@ -185,15 +185,14 @@
   }
   result->set_deserialized(true);
 
-  if (isolate->logger()->is_logging_code_events() ||
-      isolate->cpu_profiler()->is_profiling()) {
+  if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
     String* name = isolate->heap()->empty_string();
     if (result->script()->IsScript()) {
       Script* script = Script::cast(result->script());
       if (script->name()->IsString()) name = String::cast(script->name());
     }
-    isolate->logger()->CodeCreateEvent(Logger::SCRIPT_TAG,
-                                       result->abstract_code(), *result, name);
+    PROFILE(isolate, CodeCreateEvent(CodeEventListener::SCRIPT_TAG,
+                                     result->abstract_code(), *result, name));
   }
   return scope.CloseAndEscape(result);
 }
@@ -237,13 +236,13 @@
   DISALLOW_COPY_AND_ASSIGN(Checksum);
 };
 
-SerializedCodeData::SerializedCodeData(const List<byte>& payload,
-                                       const CodeSerializer& cs) {
+SerializedCodeData::SerializedCodeData(const List<byte>* payload,
+                                       const CodeSerializer* cs) {
   DisallowHeapAllocation no_gc;
-  const List<uint32_t>* stub_keys = cs.stub_keys();
+  const List<uint32_t>* stub_keys = cs->stub_keys();
 
   List<Reservation> reservations;
-  cs.EncodeReservations(&reservations);
+  cs->EncodeReservations(&reservations);
 
   // Calculate sizes.
   int reservation_size = reservations.length() * kInt32Size;
@@ -251,23 +250,23 @@
   int stub_keys_size = stub_keys->length() * kInt32Size;
   int payload_offset = kHeaderSize + reservation_size + stub_keys_size;
   int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
-  int size = padded_payload_offset + payload.length();
+  int size = padded_payload_offset + payload->length();
 
   // Allocate backing store and create result data.
   AllocateData(size);
 
   // Set header values.
-  SetMagicNumber(cs.isolate());
+  SetMagicNumber(cs->isolate());
   SetHeaderValue(kVersionHashOffset, Version::Hash());
-  SetHeaderValue(kSourceHashOffset, SourceHash(cs.source()));
+  SetHeaderValue(kSourceHashOffset, SourceHash(cs->source()));
   SetHeaderValue(kCpuFeaturesOffset,
                  static_cast<uint32_t>(CpuFeatures::SupportedFeatures()));
   SetHeaderValue(kFlagHashOffset, FlagList::Hash());
   SetHeaderValue(kNumReservationsOffset, reservations.length());
   SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys);
-  SetHeaderValue(kPayloadLengthOffset, payload.length());
+  SetHeaderValue(kPayloadLengthOffset, payload->length());
 
-  Checksum checksum(payload.ToConstVector());
+  Checksum checksum(payload->ToConstVector());
   SetHeaderValue(kChecksum1Offset, checksum.a());
   SetHeaderValue(kChecksum2Offset, checksum.b());
 
@@ -282,8 +281,8 @@
   memset(data_ + payload_offset, 0, padded_payload_offset - payload_offset);
 
   // Copy serialized data.
-  CopyBytes(data_ + padded_payload_offset, payload.begin(),
-            static_cast<size_t>(payload.length()));
+  CopyBytes(data_ + padded_payload_offset, payload->begin(),
+            static_cast<size_t>(payload->length()));
 }
 
 SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
diff --git a/src/snapshot/code-serializer.h b/src/snapshot/code-serializer.h
index 8ed4cf6..1948939 100644
--- a/src/snapshot/code-serializer.h
+++ b/src/snapshot/code-serializer.h
@@ -28,8 +28,8 @@
   const List<uint32_t>* stub_keys() const { return &stub_keys_; }
 
  private:
-  CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source)
-      : Serializer(isolate, sink), source_(source) {
+  CodeSerializer(Isolate* isolate, String* source)
+      : Serializer(isolate), source_(source) {
     reference_map_.AddAttachedReference(source);
   }
 
@@ -60,7 +60,7 @@
                                             String* source);
 
   // Used when producing.
-  SerializedCodeData(const List<byte>& payload, const CodeSerializer& cs);
+  SerializedCodeData(const List<byte>* payload, const CodeSerializer* cs);
 
   // Return ScriptData object and relinquish ownership over it to the caller.
   ScriptData* GetScriptData();
diff --git a/src/snapshot/deserializer.cc b/src/snapshot/deserializer.cc
index 88820ae..68d3489 100644
--- a/src/snapshot/deserializer.cc
+++ b/src/snapshot/deserializer.cc
@@ -31,9 +31,7 @@
 void Deserializer::FlushICacheForNewIsolate() {
   DCHECK(!deserializing_user_code_);
   // The entire isolate is newly deserialized. Simply flush all code pages.
-  PageIterator it(isolate_->heap()->code_space());
-  while (it.has_next()) {
-    Page* p = it.next();
+  for (Page* p : *isolate_->heap()->code_space()) {
     Assembler::FlushICache(isolate_, p->area_start(),
                            p->area_end() - p->area_start());
   }
@@ -101,10 +99,6 @@
         isolate_->heap()->undefined_value());
   }
 
-  // Update data pointers to the external strings containing natives sources.
-  Natives::UpdateSourceCache(isolate_->heap());
-  ExtraNatives::UpdateSourceCache(isolate_->heap());
-
   // Issue code events for newly deserialized code objects.
   LOG_CODE_EVENT(isolate_, LogCodeObjects());
   LOG_CODE_EVENT(isolate_, LogBytecodeHandlers());
@@ -481,6 +475,7 @@
         Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id); \
         new_object = isolate->heap()->root(root_index);                        \
         emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
+        hot_objects_.Add(HeapObject::cast(new_object));                        \
       } else if (where == kPartialSnapshotCache) {                             \
         int cache_index = source_.GetInt();                                    \
         new_object = isolate->partial_snapshot_cache()->at(cache_index);       \
@@ -507,12 +502,11 @@
         emit_write_barrier = false;                                            \
       }                                                                        \
       if (within == kInnerPointer) {                                           \
-        if (space_number != CODE_SPACE || new_object->IsCode()) {              \
-          Code* new_code_object = reinterpret_cast<Code*>(new_object);         \
+        if (new_object->IsCode()) {                                            \
+          Code* new_code_object = Code::cast(new_object);                      \
           new_object =                                                         \
               reinterpret_cast<Object*>(new_code_object->instruction_start()); \
         } else {                                                               \
-          DCHECK(space_number == CODE_SPACE);                                  \
           Cell* cell = Cell::cast(new_object);                                 \
           new_object = reinterpret_cast<Object*>(cell->ValueAddress());        \
         }                                                                      \
@@ -579,6 +573,9 @@
       // pointer because it points at the entry point, not at the start of the
       // code object.
       SINGLE_CASE(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
+      // Support for pointers into a cell. It's an inner pointer because it
+      // points directly at the value field, not the start of the cell object.
+      SINGLE_CASE(kNewObject, kPlain, kInnerPointer, OLD_SPACE)
       // Deserialize a new code object and write a pointer to its first
       // instruction to the current code object.
       ALL_SPACES(kNewObject, kFromCode, kInnerPointer)
@@ -605,8 +602,12 @@
       // object.
       ALL_SPACES(kBackref, kFromCode, kInnerPointer)
       ALL_SPACES(kBackrefWithSkip, kFromCode, kInnerPointer)
-      ALL_SPACES(kBackref, kPlain, kInnerPointer)
-      ALL_SPACES(kBackrefWithSkip, kPlain, kInnerPointer)
+      // Support for direct instruction pointers in functions.
+      SINGLE_CASE(kBackref, kPlain, kInnerPointer, CODE_SPACE)
+      SINGLE_CASE(kBackrefWithSkip, kPlain, kInnerPointer, CODE_SPACE)
+      // Support for pointers into a cell.
+      SINGLE_CASE(kBackref, kPlain, kInnerPointer, OLD_SPACE)
+      SINGLE_CASE(kBackrefWithSkip, kPlain, kInnerPointer, OLD_SPACE)
       // Find an object in the roots array and write a pointer to it to the
       // current object.
       SINGLE_CASE(kRootArray, kPlain, kStartOfObject, 0)
@@ -767,9 +768,8 @@
         int index = data & kHotObjectMask;
         Object* hot_object = hot_objects_.Get(index);
         UnalignedCopy(current, &hot_object);
-        if (write_barrier_needed) {
+        if (write_barrier_needed && isolate->heap()->InNewSpace(hot_object)) {
           Address current_address = reinterpret_cast<Address>(current);
-          SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address));
           isolate->heap()->RecordWrite(
               HeapObject::FromAddress(current_object_address),
               static_cast<int>(current_address - current_object_address),
diff --git a/src/snapshot/mksnapshot.cc b/src/snapshot/mksnapshot.cc
index 9fe611a..f4362e5 100644
--- a/src/snapshot/mksnapshot.cc
+++ b/src/snapshot/mksnapshot.cc
@@ -79,7 +79,7 @@
   }
 
   void WriteData(const i::Vector<const i::byte>& blob) const {
-    fprintf(fp_, "static const byte blob_data[] __attribute__((aligned(8))) = {\n");
+    fprintf(fp_, "static const byte blob_data[] = {\n");
     WriteSnapshotData(blob);
     fprintf(fp_, "};\n");
     fprintf(fp_, "static const int blob_size = %d;\n", blob.length());
@@ -150,7 +150,7 @@
   }
 
   i::CpuFeatures::Probe(true);
-  V8::InitializeICU();
+  V8::InitializeICUDefaultLocation(argv[0]);
   v8::Platform* platform = v8::platform::CreateDefaultPlatform();
   v8::V8::InitializePlatform(platform);
   v8::V8::Initialize();
diff --git a/src/snapshot/natives-common.cc b/src/snapshot/natives-common.cc
index f30e794..338b92b 100644
--- a/src/snapshot/natives-common.cc
+++ b/src/snapshot/natives-common.cc
@@ -34,24 +34,5 @@
   return heap->experimental_extra_natives_source_cache();
 }
 
-
-template <NativeType type>
-void NativesCollection<type>::UpdateSourceCache(Heap* heap) {
-  for (int i = 0; i < GetBuiltinsCount(); i++) {
-    Object* source = GetSourceCache(heap)->get(i);
-    if (!source->IsUndefined()) {
-      ExternalOneByteString::cast(source)->update_data_cache();
-    }
-  }
-}
-
-
-// Explicit template instantiations.
-template void NativesCollection<CORE>::UpdateSourceCache(Heap* heap);
-template void NativesCollection<EXPERIMENTAL>::UpdateSourceCache(Heap* heap);
-template void NativesCollection<EXTRAS>::UpdateSourceCache(Heap* heap);
-template void NativesCollection<EXPERIMENTAL_EXTRAS>::UpdateSourceCache(
-    Heap* heap);
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/snapshot/natives.h b/src/snapshot/natives.h
index 07f6b1a..e447515 100644
--- a/src/snapshot/natives.h
+++ b/src/snapshot/natives.h
@@ -44,7 +44,6 @@
   // The following methods are implemented in natives-common.cc:
 
   static FixedArray* GetSourceCache(Heap* heap);
-  static void UpdateSourceCache(Heap* heap);
 };
 
 typedef NativesCollection<CORE> Natives;
diff --git a/src/snapshot/partial-serializer.cc b/src/snapshot/partial-serializer.cc
index 34defb4..b46f675 100644
--- a/src/snapshot/partial-serializer.cc
+++ b/src/snapshot/partial-serializer.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/snapshot/partial-serializer.h"
+#include "src/snapshot/startup-serializer.h"
 
 #include "src/objects-inl.h"
 
@@ -10,11 +11,8 @@
 namespace internal {
 
 PartialSerializer::PartialSerializer(Isolate* isolate,
-                                     Serializer* startup_snapshot_serializer,
-                                     SnapshotByteSink* sink)
-    : Serializer(isolate, sink),
-      startup_serializer_(startup_snapshot_serializer),
-      next_partial_cache_index_(0) {
+                                     StartupSerializer* startup_serializer)
+    : Serializer(isolate), startup_serializer_(startup_serializer) {
   InitializeCodeAddressMap();
 }
 
@@ -34,7 +32,7 @@
     if (context->IsNativeContext()) {
       context->set(Context::NEXT_CONTEXT_LINK,
                    isolate_->heap()->undefined_value());
-      DCHECK(!context->global_object()->IsUndefined());
+      DCHECK(!context->global_object()->IsUndefined(context->GetIsolate()));
     }
   }
   VisitPointer(o);
@@ -53,19 +51,23 @@
   // Replace typed arrays by undefined.
   if (obj->IsJSTypedArray()) obj = isolate_->heap()->undefined_value();
 
+  if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
+
   int root_index = root_index_map_.Lookup(obj);
   if (root_index != RootIndexMap::kInvalidRootIndex) {
     PutRoot(root_index, obj, how_to_code, where_to_point, skip);
     return;
   }
 
+  if (SerializeBackReference(obj, how_to_code, where_to_point, skip)) return;
+
   if (ShouldBeInThePartialSnapshotCache(obj)) {
     FlushSkip(skip);
 
-    int cache_index = PartialSnapshotCacheIndex(obj);
-    sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point,
-               "PartialSnapshotCache");
-    sink_->PutInt(cache_index, "partial_snapshot_cache_index");
+    int cache_index = startup_serializer_->PartialSnapshotCacheIndex(obj);
+    sink_.Put(kPartialSnapshotCache + how_to_code + where_to_point,
+              "PartialSnapshotCache");
+    sink_.PutInt(cache_index, "partial_snapshot_cache_index");
     return;
   }
 
@@ -76,35 +78,26 @@
   // All the internalized strings that the partial snapshot needs should be
   // either in the root table or in the partial snapshot cache.
   DCHECK(!obj->IsInternalizedString());
-
-  if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
+  // Function and object templates are not context specific.
+  DCHECK(!obj->IsTemplateInfo());
 
   FlushSkip(skip);
 
   // Clear literal boilerplates.
   if (obj->IsJSFunction()) {
-    FixedArray* literals = JSFunction::cast(obj)->literals();
-    for (int i = 0; i < literals->length(); i++) literals->set_undefined(i);
+    JSFunction* function = JSFunction::cast(obj);
+    LiteralsArray* literals = function->literals();
+    for (int i = 0; i < literals->literals_count(); i++) {
+      literals->set_literal_undefined(i);
+    }
+    function->ClearTypeFeedbackInfo();
   }
 
   // Object has not yet been serialized.  Serialize it here.
-  ObjectSerializer serializer(this, obj, sink_, how_to_code, where_to_point);
+  ObjectSerializer serializer(this, obj, &sink_, how_to_code, where_to_point);
   serializer.Serialize();
 }
 
-int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
-  int index = partial_cache_index_map_.LookupOrInsert(
-      heap_object, next_partial_cache_index_);
-  if (index == PartialCacheIndexMap::kInvalidIndex) {
-    // This object is not part of the partial snapshot cache yet. Add it to the
-    // startup snapshot so we can refer to it via partial snapshot index from
-    // the partial snapshot.
-    startup_serializer_->VisitPointer(reinterpret_cast<Object**>(&heap_object));
-    return next_partial_cache_index_++;
-  }
-  return index;
-}
-
 bool PartialSerializer::ShouldBeInThePartialSnapshotCache(HeapObject* o) {
   // Scripts should be referred only through shared function infos.  We can't
   // allow them to be part of the partial snapshot because they contain a
diff --git a/src/snapshot/partial-serializer.h b/src/snapshot/partial-serializer.h
index ddaba5f..282f76e 100644
--- a/src/snapshot/partial-serializer.h
+++ b/src/snapshot/partial-serializer.h
@@ -11,10 +11,11 @@
 namespace v8 {
 namespace internal {
 
+class StartupSerializer;
+
 class PartialSerializer : public Serializer {
  public:
-  PartialSerializer(Isolate* isolate, Serializer* startup_snapshot_serializer,
-                    SnapshotByteSink* sink);
+  PartialSerializer(Isolate* isolate, StartupSerializer* startup_serializer);
 
   ~PartialSerializer() override;
 
@@ -22,36 +23,12 @@
   void Serialize(Object** o);
 
  private:
-  class PartialCacheIndexMap : public AddressMapBase {
-   public:
-    PartialCacheIndexMap() : map_(HashMap::PointersMatch) {}
-
-    static const int kInvalidIndex = -1;
-
-    // Lookup object in the map. Return its index if found, or create
-    // a new entry with new_index as value, and return kInvalidIndex.
-    int LookupOrInsert(HeapObject* obj, int new_index) {
-      HashMap::Entry* entry = LookupEntry(&map_, obj, false);
-      if (entry != NULL) return GetValue(entry);
-      SetValue(LookupEntry(&map_, obj, true), static_cast<uint32_t>(new_index));
-      return kInvalidIndex;
-    }
-
-   private:
-    HashMap map_;
-
-    DISALLOW_COPY_AND_ASSIGN(PartialCacheIndexMap);
-  };
-
   void SerializeObject(HeapObject* o, HowToCode how_to_code,
                        WhereToPoint where_to_point, int skip) override;
 
-  int PartialSnapshotCacheIndex(HeapObject* o);
   bool ShouldBeInThePartialSnapshotCache(HeapObject* o);
 
-  Serializer* startup_serializer_;
-  PartialCacheIndexMap partial_cache_index_map_;
-  int next_partial_cache_index_;
+  StartupSerializer* startup_serializer_;
   DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
 };
 
diff --git a/src/snapshot/serializer-common.cc b/src/snapshot/serializer-common.cc
index 4afaa20..41c68e8 100644
--- a/src/snapshot/serializer-common.cc
+++ b/src/snapshot/serializer-common.cc
@@ -14,7 +14,7 @@
 ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate) {
   map_ = isolate->external_reference_map();
   if (map_ != NULL) return;
-  map_ = new HashMap(HashMap::PointersMatch);
+  map_ = new base::HashMap(base::HashMap::PointersMatch);
   ExternalReferenceTable* table = ExternalReferenceTable::instance(isolate);
   for (int i = 0; i < table->size(); ++i) {
     Address addr = table->address(i);
@@ -31,16 +31,16 @@
 
 uint32_t ExternalReferenceEncoder::Encode(Address address) const {
   DCHECK_NOT_NULL(address);
-  HashMap::Entry* entry =
-      const_cast<HashMap*>(map_)->Lookup(address, Hash(address));
+  base::HashMap::Entry* entry =
+      const_cast<base::HashMap*>(map_)->Lookup(address, Hash(address));
   DCHECK_NOT_NULL(entry);
   return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
 }
 
 const char* ExternalReferenceEncoder::NameOfAddress(Isolate* isolate,
                                                     Address address) const {
-  HashMap::Entry* entry =
-      const_cast<HashMap*>(map_)->Lookup(address, Hash(address));
+  base::HashMap::Entry* entry =
+      const_cast<base::HashMap*>(map_)->Lookup(address, Hash(address));
   if (entry == NULL) return "<unknown>";
   uint32_t i = static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
   return ExternalReferenceTable::instance(isolate)->name(i);
@@ -67,7 +67,7 @@
     // During deserialization, the visitor populates the partial snapshot cache
     // and eventually terminates the cache with undefined.
     visitor->VisitPointer(&cache->at(i));
-    if (cache->at(i)->IsUndefined()) break;
+    if (cache->at(i)->IsUndefined(isolate)) break;
   }
 }
 
diff --git a/src/snapshot/serializer-common.h b/src/snapshot/serializer-common.h
index 1ce5ced..bdd2b51 100644
--- a/src/snapshot/serializer-common.h
+++ b/src/snapshot/serializer-common.h
@@ -28,7 +28,7 @@
                                  kPointerSizeLog2);
   }
 
-  HashMap* map_;
+  base::HashMap* map_;
 
   DISALLOW_COPY_AND_ASSIGN(ExternalReferenceEncoder);
 };
@@ -94,31 +94,26 @@
   STATIC_ASSERT(5 == kNumberOfSpaces);
   enum Where {
     // 0x00..0x04  Allocate new object, in specified space.
-    kNewObject = 0,
-    // 0x05        Unused (including 0x25, 0x45, 0x65).
-    // 0x06        Unused (including 0x26, 0x46, 0x66).
-    // 0x07        Unused (including 0x27, 0x47, 0x67).
+    kNewObject = 0x00,
     // 0x08..0x0c  Reference to previous object from space.
     kBackref = 0x08,
-    // 0x0d        Unused (including 0x2d, 0x4d, 0x6d).
-    // 0x0e        Unused (including 0x2e, 0x4e, 0x6e).
-    // 0x0f        Unused (including 0x2f, 0x4f, 0x6f).
     // 0x10..0x14  Reference to previous object from space after skip.
     kBackrefWithSkip = 0x10,
-    // 0x15        Unused (including 0x35, 0x55, 0x75).
-    // 0x16        Unused (including 0x36, 0x56, 0x76).
-    // 0x17        Misc (including 0x37, 0x57, 0x77).
-    // 0x18        Root array item.
-    kRootArray = 0x18,
-    // 0x19        Object in the partial snapshot cache.
-    kPartialSnapshotCache = 0x19,
-    // 0x1a        External reference referenced by id.
-    kExternalReference = 0x1a,
-    // 0x1b        Object provided in the attached list.
-    kAttachedReference = 0x1b,
-    // 0x1c        Builtin code referenced by index.
-    kBuiltin = 0x1c
-    // 0x1d..0x1f  Misc (including 0x3d..0x3f, 0x5d..0x5f, 0x7d..0x7f)
+
+    // 0x05       Root array item.
+    kRootArray = 0x05,
+    // 0x06        Object in the partial snapshot cache.
+    kPartialSnapshotCache = 0x06,
+    // 0x07        External reference referenced by id.
+    kExternalReference = 0x07,
+
+    // 0x0d        Object provided in the attached list.
+    kAttachedReference = 0x0d,
+    // 0x0e        Builtin code referenced by index.
+    kBuiltin = 0x0e,
+
+    // 0x0f        Misc, see below (incl. 0x2f, 0x4f, 0x6f).
+    // 0x15..0x1f  Misc, see below (incl. 0x35..0x3f, 0x55..0x5f, 0x75..0x7f).
   };
 
   static const int kWhereMask = 0x1f;
@@ -147,36 +142,45 @@
 
   // ---------- Misc ----------
   // Skip.
-  static const int kSkip = 0x1d;
-  // Internal reference encoded as offsets of pc and target from code entry.
-  static const int kInternalReference = 0x1e;
-  static const int kInternalReferenceEncoded = 0x1f;
+  static const int kSkip = 0x0f;
   // Do nothing, used for padding.
-  static const int kNop = 0x3d;
+  static const int kNop = 0x2f;
   // Move to next reserved chunk.
-  static const int kNextChunk = 0x3e;
+  static const int kNextChunk = 0x4f;
   // Deferring object content.
-  static const int kDeferred = 0x3f;
-  // Used for the source code of the natives, which is in the executable, but
-  // is referred to from external strings in the snapshot.
-  static const int kNativesStringResource = 0x5d;
-  // Used for the source code for compiled stubs, which is in the executable,
-  // but is referred to from external strings in the snapshot.
-  static const int kExtraNativesStringResource = 0x5e;
+  static const int kDeferred = 0x6f;
+  // Alignment prefixes 0x15..0x17
+  static const int kAlignmentPrefix = 0x15;
   // A tag emitted at strategic points in the snapshot to delineate sections.
   // If the deserializer does not find these at the expected moments then it
   // is an indication that the snapshot and the VM do not fit together.
   // Examine the build process for architecture, version or configuration
   // mismatches.
-  static const int kSynchronize = 0x17;
+  static const int kSynchronize = 0x18;
   // Repeats of variable length.
-  static const int kVariableRepeat = 0x37;
+  static const int kVariableRepeat = 0x19;
   // Raw data of variable length.
-  static const int kVariableRawData = 0x57;
-  // Alignment prefixes 0x7d..0x7f
-  static const int kAlignmentPrefix = 0x7d;
+  static const int kVariableRawData = 0x1a;
+  // Internal reference encoded as offsets of pc and target from code entry.
+  static const int kInternalReference = 0x1b;
+  static const int kInternalReferenceEncoded = 0x1c;
+  // Used for the source code of the natives, which is in the executable, but
+  // is referred to from external strings in the snapshot.
+  static const int kNativesStringResource = 0x1d;
+  // Used for the source code for compiled stubs, which is in the executable,
+  // but is referred to from external strings in the snapshot.
+  static const int kExtraNativesStringResource = 0x1e;
 
-  // 0x77 unused
+  // 8 hot (recently seen or back-referenced) objects with optional skip.
+  static const int kNumberOfHotObjects = 8;
+  STATIC_ASSERT(kNumberOfHotObjects == HotObjectsList::kSize);
+  // 0x38..0x3f
+  static const int kHotObject = 0x38;
+  // 0x58..0x5f
+  static const int kHotObjectWithSkip = 0x58;
+  static const int kHotObjectMask = 0x07;
+
+  // 0x1f, 0x35..0x37, 0x55..0x57, 0x75..0x7f unused.
 
   // ---------- byte code range 0x80..0xff ----------
   // First 32 root array items.
@@ -187,27 +191,21 @@
   static const int kRootArrayConstantsWithSkip = 0xa0;
   static const int kRootArrayConstantsMask = 0x1f;
 
-  // 8 hot (recently seen or back-referenced) objects with optional skip.
-  static const int kNumberOfHotObjects = 0x08;
-  // 0xc0..0xc7
-  static const int kHotObject = 0xc0;
-  // 0xc8..0xcf
-  static const int kHotObjectWithSkip = 0xc8;
-  static const int kHotObjectMask = 0x07;
-
   // 32 common raw data lengths.
   static const int kNumberOfFixedRawData = 0x20;
-  // 0xd0..0xef
-  static const int kFixedRawData = 0xd0;
+  // 0xc0..0xdf
+  static const int kFixedRawData = 0xc0;
   static const int kOnePointerRawData = kFixedRawData;
   static const int kFixedRawDataStart = kFixedRawData - 1;
 
   // 16 repeats lengths.
   static const int kNumberOfFixedRepeat = 0x10;
-  // 0xf0..0xff
-  static const int kFixedRepeat = 0xf0;
+  // 0xe0..0xef
+  static const int kFixedRepeat = 0xe0;
   static const int kFixedRepeatStart = kFixedRepeat - 1;
 
+  // 0xf0..0xff unused.
+
   // ---------- special values ----------
   static const int kAnyOldSpace = -1;
 
diff --git a/src/snapshot/serializer.cc b/src/snapshot/serializer.cc
index f6f2200..b6a75ff 100644
--- a/src/snapshot/serializer.cc
+++ b/src/snapshot/serializer.cc
@@ -10,9 +10,8 @@
 namespace v8 {
 namespace internal {
 
-Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
+Serializer::Serializer(Isolate* isolate)
     : isolate_(isolate),
-      sink_(sink),
       external_reference_encoder_(isolate),
       root_index_map_(isolate),
       recursion_depth_(0),
@@ -90,10 +89,10 @@
 void Serializer::SerializeDeferredObjects() {
   while (deferred_objects_.length() > 0) {
     HeapObject* obj = deferred_objects_.RemoveLast();
-    ObjectSerializer obj_serializer(this, obj, sink_, kPlain, kStartOfObject);
+    ObjectSerializer obj_serializer(this, obj, &sink_, kPlain, kStartOfObject);
     obj_serializer.SerializeDeferred();
   }
-  sink_->Put(kSynchronize, "Finished with deferred objects");
+  sink_.Put(kSynchronize, "Finished with deferred objects");
 }
 
 void Serializer::VisitPointers(Object** start, Object** end) {
@@ -141,62 +140,61 @@
 }
 #endif  // DEBUG
 
-bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
-                                      WhereToPoint where_to_point, int skip) {
-  if (how_to_code == kPlain && where_to_point == kStartOfObject) {
-    // Encode a reference to a hot object by its index in the working set.
-    int index = hot_objects_.Find(obj);
-    if (index != HotObjectsList::kNotFound) {
-      DCHECK(index >= 0 && index < kNumberOfHotObjects);
-      if (FLAG_trace_serializer) {
-        PrintF(" Encoding hot object %d:", index);
-        obj->ShortPrint();
-        PrintF("\n");
-      }
-      if (skip != 0) {
-        sink_->Put(kHotObjectWithSkip + index, "HotObjectWithSkip");
-        sink_->PutInt(skip, "HotObjectSkipDistance");
-      } else {
-        sink_->Put(kHotObject + index, "HotObject");
-      }
-      return true;
-    }
+bool Serializer::SerializeHotObject(HeapObject* obj, HowToCode how_to_code,
+                                    WhereToPoint where_to_point, int skip) {
+  if (how_to_code != kPlain || where_to_point != kStartOfObject) return false;
+  // Encode a reference to a hot object by its index in the working set.
+  int index = hot_objects_.Find(obj);
+  if (index == HotObjectsList::kNotFound) return false;
+  DCHECK(index >= 0 && index < kNumberOfHotObjects);
+  if (FLAG_trace_serializer) {
+    PrintF(" Encoding hot object %d:", index);
+    obj->ShortPrint();
+    PrintF("\n");
   }
+  if (skip != 0) {
+    sink_.Put(kHotObjectWithSkip + index, "HotObjectWithSkip");
+    sink_.PutInt(skip, "HotObjectSkipDistance");
+  } else {
+    sink_.Put(kHotObject + index, "HotObject");
+  }
+  return true;
+}
+bool Serializer::SerializeBackReference(HeapObject* obj, HowToCode how_to_code,
+                                        WhereToPoint where_to_point, int skip) {
   SerializerReference reference = reference_map_.Lookup(obj);
-  if (reference.is_valid()) {
-    // Encode the location of an already deserialized object in order to write
-    // its location into a later object.  We can encode the location as an
-    // offset fromthe start of the deserialized objects or as an offset
-    // backwards from thecurrent allocation pointer.
-    if (reference.is_attached_reference()) {
-      FlushSkip(skip);
-      if (FLAG_trace_serializer) {
-        PrintF(" Encoding attached reference %d\n",
-               reference.attached_reference_index());
-      }
-      PutAttachedReference(reference, how_to_code, where_to_point);
-    } else {
-      DCHECK(reference.is_back_reference());
-      if (FLAG_trace_serializer) {
-        PrintF(" Encoding back reference to: ");
-        obj->ShortPrint();
-        PrintF("\n");
-      }
-
-      PutAlignmentPrefix(obj);
-      AllocationSpace space = reference.space();
-      if (skip == 0) {
-        sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRef");
-      } else {
-        sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space,
-                   "BackRefWithSkip");
-        sink_->PutInt(skip, "BackRefSkipDistance");
-      }
-      PutBackReference(obj, reference);
+  if (!reference.is_valid()) return false;
+  // Encode the location of an already deserialized object in order to write
+  // its location into a later object.  We can encode the location as an
+  // offset fromthe start of the deserialized objects or as an offset
+  // backwards from thecurrent allocation pointer.
+  if (reference.is_attached_reference()) {
+    FlushSkip(skip);
+    if (FLAG_trace_serializer) {
+      PrintF(" Encoding attached reference %d\n",
+             reference.attached_reference_index());
     }
-    return true;
+    PutAttachedReference(reference, how_to_code, where_to_point);
+  } else {
+    DCHECK(reference.is_back_reference());
+    if (FLAG_trace_serializer) {
+      PrintF(" Encoding back reference to: ");
+      obj->ShortPrint();
+      PrintF("\n");
+    }
+
+    PutAlignmentPrefix(obj);
+    AllocationSpace space = reference.space();
+    if (skip == 0) {
+      sink_.Put(kBackref + how_to_code + where_to_point + space, "BackRef");
+    } else {
+      sink_.Put(kBackrefWithSkip + how_to_code + where_to_point + space,
+                "BackRefWithSkip");
+      sink_.PutInt(skip, "BackRefSkipDistance");
+    }
+    PutBackReference(obj, reference);
   }
-  return false;
+  return true;
 }
 
 void Serializer::PutRoot(int root_index, HeapObject* object,
@@ -213,28 +211,29 @@
       root_index < kNumberOfRootArrayConstants &&
       !isolate()->heap()->InNewSpace(object)) {
     if (skip == 0) {
-      sink_->Put(kRootArrayConstants + root_index, "RootConstant");
+      sink_.Put(kRootArrayConstants + root_index, "RootConstant");
     } else {
-      sink_->Put(kRootArrayConstantsWithSkip + root_index, "RootConstant");
-      sink_->PutInt(skip, "SkipInPutRoot");
+      sink_.Put(kRootArrayConstantsWithSkip + root_index, "RootConstant");
+      sink_.PutInt(skip, "SkipInPutRoot");
     }
   } else {
     FlushSkip(skip);
-    sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
-    sink_->PutInt(root_index, "root_index");
+    sink_.Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
+    sink_.PutInt(root_index, "root_index");
+    hot_objects_.Add(object);
   }
 }
 
 void Serializer::PutSmi(Smi* smi) {
-  sink_->Put(kOnePointerRawData, "Smi");
+  sink_.Put(kOnePointerRawData, "Smi");
   byte* bytes = reinterpret_cast<byte*>(&smi);
-  for (int i = 0; i < kPointerSize; i++) sink_->Put(bytes[i], "Byte");
+  for (int i = 0; i < kPointerSize; i++) sink_.Put(bytes[i], "Byte");
 }
 
 void Serializer::PutBackReference(HeapObject* object,
                                   SerializerReference reference) {
   DCHECK(BackReferenceIsAlreadyAllocated(reference));
-  sink_->PutInt(reference.back_reference(), "BackRefValue");
+  sink_.PutInt(reference.back_reference(), "BackRefValue");
   hot_objects_.Add(object);
 }
 
@@ -245,8 +244,8 @@
   DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
          (how_to_code == kPlain && where_to_point == kInnerPointer) ||
          (how_to_code == kFromCode && where_to_point == kInnerPointer));
-  sink_->Put(kAttachedReference + how_to_code + where_to_point, "AttachedRef");
-  sink_->PutInt(reference.attached_reference_index(), "AttachedRefIndex");
+  sink_.Put(kAttachedReference + how_to_code + where_to_point, "AttachedRef");
+  sink_.PutInt(reference.attached_reference_index(), "AttachedRefIndex");
 }
 
 int Serializer::PutAlignmentPrefix(HeapObject* object) {
@@ -254,7 +253,7 @@
   if (alignment != kWordAligned) {
     DCHECK(1 <= alignment && alignment <= 3);
     byte prefix = (kAlignmentPrefix - 1) + alignment;
-    sink_->Put(prefix, "Alignment");
+    sink_.Put(prefix, "Alignment");
     return Heap::GetMaximumFillToAlign(alignment);
   }
   return 0;
@@ -274,8 +273,8 @@
   if (new_chunk_size > max_chunk_size(space)) {
     // The new chunk size would not fit onto a single page. Complete the
     // current chunk and start a new one.
-    sink_->Put(kNextChunk, "NextChunk");
-    sink_->Put(space, "NextChunkSpace");
+    sink_.Put(kNextChunk, "NextChunk");
+    sink_.Put(space, "NextChunkSpace");
     completed_chunks_[space].Add(pending_chunk_[space]);
     pending_chunk_[space] = 0;
     new_chunk_size = size;
@@ -290,11 +289,11 @@
   // The non-branching GetInt will read up to 3 bytes too far, so we need
   // to pad the snapshot to make sure we don't read over the end.
   for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
-    sink_->Put(kNop, "Padding");
+    sink_.Put(kNop, "Padding");
   }
   // Pad up to pointer size for checksum.
-  while (!IsAligned(sink_->Position(), kPointerAlignment)) {
-    sink_->Put(kNop, "Padding");
+  while (!IsAligned(sink_.Position(), kPointerAlignment)) {
+    sink_.Put(kNop, "Padding");
   }
 }
 
@@ -668,9 +667,10 @@
     int builtin_count,
     v8::String::ExternalOneByteStringResource** resource_pointer,
     FixedArray* source_cache, int resource_index) {
+  Isolate* isolate = serializer_->isolate();
   for (int i = 0; i < builtin_count; i++) {
     Object* source = source_cache->get(i);
-    if (!source->IsUndefined()) {
+    if (!source->IsUndefined(isolate)) {
       ExternalOneByteString* string = ExternalOneByteString::cast(source);
       typedef v8::String::ExternalOneByteStringResource Resource;
       const Resource* resource = string->resource();
@@ -687,6 +687,9 @@
 
 void Serializer::ObjectSerializer::VisitExternalOneByteString(
     v8::String::ExternalOneByteStringResource** resource_pointer) {
+  DCHECK_EQ(serializer_->isolate()->heap()->native_source_string_map(),
+            object_->map());
+  DCHECK(ExternalOneByteString::cast(object_)->is_short());
   Address references_start = reinterpret_cast<Address>(resource_pointer);
   OutputRawData(references_start);
   if (SerializeExternalNativeSourceString(
@@ -707,25 +710,27 @@
 }
 
 Address Serializer::ObjectSerializer::PrepareCode() {
-  // To make snapshots reproducible, we make a copy of the code object
-  // and wipe all pointers in the copy, which we then serialize.
-  Code* original = Code::cast(object_);
-  Code* code = serializer_->CopyCode(original);
+  Code* code = Code::cast(object_);
+  if (FLAG_predictable) {
+    // To make snapshots reproducible, we make a copy of the code object
+    // and wipe all pointers in the copy, which we then serialize.
+    code = serializer_->CopyCode(code);
+    int mode_mask = RelocInfo::kCodeTargetMask |
+                    RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+                    RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+                    RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
+                    RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+                    RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+    for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
+      RelocInfo* rinfo = it.rinfo();
+      rinfo->WipeOut();
+    }
+    // We need to wipe out the header fields *after* wiping out the
+    // relocations, because some of these fields are needed for the latter.
+    code->WipeOutHeader();
+  }
   // Code age headers are not serializable.
   code->MakeYoung(serializer_->isolate());
-  int mode_mask = RelocInfo::kCodeTargetMask |
-                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
-                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
-                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
-                  RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
-                  RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
-  for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
-    RelocInfo* rinfo = it.rinfo();
-    rinfo->WipeOut();
-  }
-  // We need to wipe out the header fields *after* wiping out the
-  // relocations, because some of these fields are needed for the latter.
-  code->WipeOutHeader();
   return code->address();
 }
 
diff --git a/src/snapshot/serializer.h b/src/snapshot/serializer.h
index f99cd72..45f891e 100644
--- a/src/snapshot/serializer.h
+++ b/src/snapshot/serializer.h
@@ -38,28 +38,29 @@
  private:
   class NameMap {
    public:
-    NameMap() : impl_(HashMap::PointersMatch) {}
+    NameMap() : impl_(base::HashMap::PointersMatch) {}
 
     ~NameMap() {
-      for (HashMap::Entry* p = impl_.Start(); p != NULL; p = impl_.Next(p)) {
+      for (base::HashMap::Entry* p = impl_.Start(); p != NULL;
+           p = impl_.Next(p)) {
         DeleteArray(static_cast<const char*>(p->value));
       }
     }
 
     void Insert(Address code_address, const char* name, int name_size) {
-      HashMap::Entry* entry = FindOrCreateEntry(code_address);
+      base::HashMap::Entry* entry = FindOrCreateEntry(code_address);
       if (entry->value == NULL) {
         entry->value = CopyName(name, name_size);
       }
     }
 
     const char* Lookup(Address code_address) {
-      HashMap::Entry* entry = FindEntry(code_address);
+      base::HashMap::Entry* entry = FindEntry(code_address);
       return (entry != NULL) ? static_cast<const char*>(entry->value) : NULL;
     }
 
     void Remove(Address code_address) {
-      HashMap::Entry* entry = FindEntry(code_address);
+      base::HashMap::Entry* entry = FindEntry(code_address);
       if (entry != NULL) {
         DeleteArray(static_cast<char*>(entry->value));
         RemoveEntry(entry);
@@ -68,11 +69,11 @@
 
     void Move(Address from, Address to) {
       if (from == to) return;
-      HashMap::Entry* from_entry = FindEntry(from);
+      base::HashMap::Entry* from_entry = FindEntry(from);
       DCHECK(from_entry != NULL);
       void* value = from_entry->value;
       RemoveEntry(from_entry);
-      HashMap::Entry* to_entry = FindOrCreateEntry(to);
+      base::HashMap::Entry* to_entry = FindOrCreateEntry(to);
       DCHECK(to_entry->value == NULL);
       to_entry->value = value;
     }
@@ -89,20 +90,20 @@
       return result;
     }
 
-    HashMap::Entry* FindOrCreateEntry(Address code_address) {
+    base::HashMap::Entry* FindOrCreateEntry(Address code_address) {
       return impl_.LookupOrInsert(code_address,
                                   ComputePointerHash(code_address));
     }
 
-    HashMap::Entry* FindEntry(Address code_address) {
+    base::HashMap::Entry* FindEntry(Address code_address) {
       return impl_.Lookup(code_address, ComputePointerHash(code_address));
     }
 
-    void RemoveEntry(HashMap::Entry* entry) {
+    void RemoveEntry(base::HashMap::Entry* entry) {
       impl_.Remove(entry->key, entry->hash);
     }
 
-    HashMap impl_;
+    base::HashMap impl_;
 
     DISALLOW_COPY_AND_ASSIGN(NameMap);
   };
@@ -119,7 +120,7 @@
 // There can be only one serializer per V8 process.
 class Serializer : public SerializerDeserializer {
  public:
-  Serializer(Isolate* isolate, SnapshotByteSink* sink);
+  explicit Serializer(Isolate* isolate);
   ~Serializer() override;
 
   void EncodeReservations(List<SerializedData::Reservation>* out) const;
@@ -170,14 +171,18 @@
   // Emit alignment prefix if necessary, return required padding space in bytes.
   int PutAlignmentPrefix(HeapObject* object);
 
-  // Returns true if the object was successfully serialized.
-  bool SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
-                            WhereToPoint where_to_point, int skip);
+  // Returns true if the object was successfully serialized as hot object.
+  bool SerializeHotObject(HeapObject* obj, HowToCode how_to_code,
+                          WhereToPoint where_to_point, int skip);
+
+  // Returns true if the object was successfully serialized as back reference.
+  bool SerializeBackReference(HeapObject* obj, HowToCode how_to_code,
+                              WhereToPoint where_to_point, int skip);
 
   inline void FlushSkip(int skip) {
     if (skip != 0) {
-      sink_->Put(kSkip, "SkipFromSerializeObject");
-      sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
+      sink_.Put(kSkip, "SkipFromSerializeObject");
+      sink_.PutInt(skip, "SkipDistanceFromSerializeObject");
     }
   }
 
@@ -207,7 +212,7 @@
     return max_chunk_size_[space];
   }
 
-  SnapshotByteSink* sink() const { return sink_; }
+  const SnapshotByteSink* sink() const { return &sink_; }
 
   void QueueDeferredObject(HeapObject* obj) {
     DCHECK(reference_map_.Lookup(obj).is_back_reference());
@@ -218,7 +223,7 @@
 
   Isolate* isolate_;
 
-  SnapshotByteSink* sink_;
+  SnapshotByteSink sink_;
   ExternalReferenceEncoder external_reference_encoder_;
 
   SerializerReferenceMap reference_map_;
diff --git a/src/snapshot/snapshot-common.cc b/src/snapshot/snapshot-common.cc
index a951b0d..5eac4af 100644
--- a/src/snapshot/snapshot-common.cc
+++ b/src/snapshot/snapshot-common.cc
@@ -18,8 +18,7 @@
 
 #ifdef DEBUG
 bool Snapshot::SnapshotIsValid(v8::StartupData* snapshot_blob) {
-  return !Snapshot::ExtractStartupData(snapshot_blob).is_empty() &&
-         !Snapshot::ExtractContextData(snapshot_blob).is_empty();
+  return Snapshot::ExtractNumContexts(snapshot_blob) > 0;
 }
 #endif  // DEBUG
 
@@ -31,12 +30,6 @@
 }
 
 
-bool Snapshot::EmbedsScript(Isolate* isolate) {
-  if (!isolate->snapshot_available()) return false;
-  return ExtractMetadata(isolate->snapshot_blob()).embeds_script();
-}
-
-
 uint32_t Snapshot::SizeOfFirstPage(Isolate* isolate, AllocationSpace space) {
   DCHECK(space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE);
   if (!isolate->snapshot_available()) {
@@ -67,15 +60,16 @@
   return success;
 }
 
-
 MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
-    Isolate* isolate, Handle<JSGlobalProxy> global_proxy) {
+    Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
+    size_t context_index) {
   if (!isolate->snapshot_available()) return Handle<Context>();
   base::ElapsedTimer timer;
   if (FLAG_profile_deserialization) timer.Start();
 
   const v8::StartupData* blob = isolate->snapshot_blob();
-  Vector<const byte> context_data = ExtractContextData(blob);
+  Vector<const byte> context_data =
+      ExtractContextData(blob, static_cast<int>(context_index));
   SnapshotData snapshot_data(context_data);
   Deserializer deserializer(&snapshot_data);
 
@@ -87,178 +81,192 @@
   if (FLAG_profile_deserialization) {
     double ms = timer.Elapsed().InMillisecondsF();
     int bytes = context_data.length();
-    PrintF("[Deserializing context (%d bytes) took %0.3f ms]\n", bytes, ms);
+    PrintF("[Deserializing context #%zu (%d bytes) took %0.3f ms]\n",
+           context_index, bytes, ms);
   }
   return Handle<Context>::cast(result);
 }
 
+void UpdateMaxRequirementPerPage(
+    uint32_t* requirements,
+    Vector<const SerializedData::Reservation> reservations) {
+  int space = 0;
+  uint32_t current_requirement = 0;
+  for (const auto& reservation : reservations) {
+    current_requirement += reservation.chunk_size();
+    if (reservation.is_last()) {
+      requirements[space] = std::max(requirements[space], current_requirement);
+      current_requirement = 0;
+      space++;
+    }
+  }
+  DCHECK_EQ(i::Serializer::kNumberOfSpaces, space);
+}
 
-void CalculateFirstPageSizes(bool is_default_snapshot,
-                             const SnapshotData& startup_snapshot,
-                             const SnapshotData& context_snapshot,
+void CalculateFirstPageSizes(const SnapshotData* startup_snapshot,
+                             const List<SnapshotData*>* context_snapshots,
                              uint32_t* sizes_out) {
-  Vector<const SerializedData::Reservation> startup_reservations =
-      startup_snapshot.Reservations();
-  Vector<const SerializedData::Reservation> context_reservations =
-      context_snapshot.Reservations();
-  int startup_index = 0;
-  int context_index = 0;
-
   if (FLAG_profile_deserialization) {
     int startup_total = 0;
-    int context_total = 0;
-    for (auto& reservation : startup_reservations) {
+    PrintF("Deserialization will reserve:\n");
+    for (const auto& reservation : startup_snapshot->Reservations()) {
       startup_total += reservation.chunk_size();
     }
-    for (auto& reservation : context_reservations) {
-      context_total += reservation.chunk_size();
+    PrintF("%10d bytes per isolate\n", startup_total);
+    for (int i = 0; i < context_snapshots->length(); i++) {
+      int context_total = 0;
+      for (const auto& reservation : context_snapshots->at(i)->Reservations()) {
+        context_total += reservation.chunk_size();
+      }
+      PrintF("%10d bytes per context #%d\n", context_total, i);
     }
-    PrintF(
-        "Deserialization will reserve:\n"
-        "%10d bytes per isolate\n"
-        "%10d bytes per context\n",
-        startup_total, context_total);
+  }
+
+  uint32_t startup_requirements[i::Serializer::kNumberOfSpaces];
+  uint32_t context_requirements[i::Serializer::kNumberOfSpaces];
+  for (int space = 0; space < i::Serializer::kNumberOfSpaces; space++) {
+    startup_requirements[space] = 0;
+    context_requirements[space] = 0;
+  }
+
+  UpdateMaxRequirementPerPage(startup_requirements,
+                              startup_snapshot->Reservations());
+  for (const auto& context_snapshot : *context_snapshots) {
+    UpdateMaxRequirementPerPage(context_requirements,
+                                context_snapshot->Reservations());
   }
 
   for (int space = 0; space < i::Serializer::kNumberOfSpaces; space++) {
-    bool single_chunk = true;
-    while (!startup_reservations[startup_index].is_last()) {
-      single_chunk = false;
-      startup_index++;
-    }
-    while (!context_reservations[context_index].is_last()) {
-      single_chunk = false;
-      context_index++;
-    }
-
-    uint32_t required = kMaxUInt32;
-    if (single_chunk) {
-      // If both the startup snapshot data and the context snapshot data on
-      // this space fit in a single page, then we consider limiting the size
-      // of the first page. For this, we add the chunk sizes and some extra
-      // allowance. This way we achieve a smaller startup memory footprint.
-      required = (startup_reservations[startup_index].chunk_size() +
-                  2 * context_reservations[context_index].chunk_size()) +
-                 Page::kObjectStartOffset;
-      // Add a small allowance to the code space for small scripts.
-      if (space == CODE_SPACE) required += 32 * KB;
-    } else if (!FLAG_debug_code) {
-      // We expect the vanilla snapshot to only require one page per space,
-      // unless we are emitting debug code.
-      DCHECK(!is_default_snapshot);
-    }
+    // If the space requirement for a page is less than a page size, we consider
+    // limiting the size of the first page in order to save memory on startup.
+    uint32_t required = startup_requirements[space] +
+                        2 * context_requirements[space] +
+                        Page::kObjectStartOffset;
+    // Add a small allowance to the code space for small scripts.
+    if (space == CODE_SPACE) required += 32 * KB;
 
     if (space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE) {
       uint32_t max_size =
           MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(space));
-      sizes_out[space - FIRST_PAGED_SPACE] = Min(required, max_size);
-    } else {
-      DCHECK(single_chunk);
+      sizes_out[space - FIRST_PAGED_SPACE] = std::min(required, max_size);
     }
-    startup_index++;
-    context_index++;
   }
-
-  DCHECK_EQ(startup_reservations.length(), startup_index);
-  DCHECK_EQ(context_reservations.length(), context_index);
 }
 
-
 v8::StartupData Snapshot::CreateSnapshotBlob(
-    const i::StartupSerializer& startup_ser,
-    const i::PartialSerializer& context_ser, Snapshot::Metadata metadata) {
-  SnapshotData startup_snapshot(startup_ser);
-  SnapshotData context_snapshot(context_ser);
-  Vector<const byte> startup_data = startup_snapshot.RawData();
-  Vector<const byte> context_data = context_snapshot.RawData();
+    const SnapshotData* startup_snapshot,
+    const List<SnapshotData*>* context_snapshots) {
+  int num_contexts = context_snapshots->length();
+  int startup_snapshot_offset = StartupSnapshotOffset(num_contexts);
+  int total_length = startup_snapshot_offset;
+  total_length += startup_snapshot->RawData().length();
+  for (const auto& context_snapshot : *context_snapshots) {
+    total_length += context_snapshot->RawData().length();
+  }
 
   uint32_t first_page_sizes[kNumPagedSpaces];
+  CalculateFirstPageSizes(startup_snapshot, context_snapshots,
+                          first_page_sizes);
 
-  CalculateFirstPageSizes(!metadata.embeds_script(), startup_snapshot,
-                          context_snapshot, first_page_sizes);
-
-  int startup_length = startup_data.length();
-  int context_length = context_data.length();
-  int context_offset = ContextOffset(startup_length);
-
-  int length = context_offset + context_length;
-  char* data = new char[length];
-
-  memcpy(data + kMetadataOffset, &metadata.RawValue(), kInt32Size);
+  char* data = new char[total_length];
   memcpy(data + kFirstPageSizesOffset, first_page_sizes,
          kNumPagedSpaces * kInt32Size);
-  memcpy(data + kStartupLengthOffset, &startup_length, kInt32Size);
-  memcpy(data + kStartupDataOffset, startup_data.begin(), startup_length);
-  memcpy(data + context_offset, context_data.begin(), context_length);
-  v8::StartupData result = {data, length};
-
+  memcpy(data + kNumberOfContextsOffset, &num_contexts, kInt32Size);
+  int payload_offset = StartupSnapshotOffset(num_contexts);
+  int payload_length = startup_snapshot->RawData().length();
+  memcpy(data + payload_offset, startup_snapshot->RawData().start(),
+         payload_length);
   if (FLAG_profile_deserialization) {
-    PrintF(
-        "Snapshot blob consists of:\n"
-        "%10d bytes for startup\n"
-        "%10d bytes for context\n",
-        startup_length, context_length);
+    PrintF("Snapshot blob consists of:\n%10d bytes for startup\n",
+           payload_length);
   }
+  payload_offset += payload_length;
+  for (int i = 0; i < num_contexts; i++) {
+    memcpy(data + ContextSnapshotOffsetOffset(i), &payload_offset, kInt32Size);
+    SnapshotData* context_snapshot = context_snapshots->at(i);
+    payload_length = context_snapshot->RawData().length();
+    memcpy(data + payload_offset, context_snapshot->RawData().start(),
+           payload_length);
+    if (FLAG_profile_deserialization) {
+      PrintF("%10d bytes for context #%d\n", payload_length, i);
+    }
+    payload_offset += payload_length;
+  }
+
+  v8::StartupData result = {data, total_length};
   return result;
 }
 
-
-Snapshot::Metadata Snapshot::ExtractMetadata(const v8::StartupData* data) {
-  uint32_t raw;
-  memcpy(&raw, data->data + kMetadataOffset, kInt32Size);
-  return Metadata(raw);
+int Snapshot::ExtractNumContexts(const v8::StartupData* data) {
+  CHECK_LT(kNumberOfContextsOffset, data->raw_size);
+  int num_contexts;
+  memcpy(&num_contexts, data->data + kNumberOfContextsOffset, kInt32Size);
+  return num_contexts;
 }
 
-
 Vector<const byte> Snapshot::ExtractStartupData(const v8::StartupData* data) {
-  DCHECK_LT(kIntSize, data->raw_size);
-  int startup_length;
-  memcpy(&startup_length, data->data + kStartupLengthOffset, kInt32Size);
-  DCHECK_LT(startup_length, data->raw_size);
+  int num_contexts = ExtractNumContexts(data);
+  int startup_offset = StartupSnapshotOffset(num_contexts);
+  CHECK_LT(startup_offset, data->raw_size);
+  int first_context_offset;
+  memcpy(&first_context_offset, data->data + ContextSnapshotOffsetOffset(0),
+         kInt32Size);
+  CHECK_LT(first_context_offset, data->raw_size);
+  int startup_length = first_context_offset - startup_offset;
   const byte* startup_data =
-      reinterpret_cast<const byte*>(data->data + kStartupDataOffset);
+      reinterpret_cast<const byte*>(data->data + startup_offset);
   return Vector<const byte>(startup_data, startup_length);
 }
 
+Vector<const byte> Snapshot::ExtractContextData(const v8::StartupData* data,
+                                                int index) {
+  int num_contexts = ExtractNumContexts(data);
+  CHECK_LT(index, num_contexts);
 
-Vector<const byte> Snapshot::ExtractContextData(const v8::StartupData* data) {
-  DCHECK_LT(kIntSize, data->raw_size);
-  int startup_length;
-  memcpy(&startup_length, data->data + kStartupLengthOffset, kIntSize);
-  int context_offset = ContextOffset(startup_length);
+  int context_offset;
+  memcpy(&context_offset, data->data + ContextSnapshotOffsetOffset(index),
+         kInt32Size);
+  int next_context_offset;
+  if (index == num_contexts - 1) {
+    next_context_offset = data->raw_size;
+  } else {
+    memcpy(&next_context_offset,
+           data->data + ContextSnapshotOffsetOffset(index + 1), kInt32Size);
+    CHECK_LT(next_context_offset, data->raw_size);
+  }
+
   const byte* context_data =
       reinterpret_cast<const byte*>(data->data + context_offset);
-  DCHECK_LT(context_offset, data->raw_size);
-  int context_length = data->raw_size - context_offset;
+  int context_length = next_context_offset - context_offset;
   return Vector<const byte>(context_data, context_length);
 }
 
-SnapshotData::SnapshotData(const Serializer& ser) {
+SnapshotData::SnapshotData(const Serializer* serializer) {
   DisallowHeapAllocation no_gc;
   List<Reservation> reservations;
-  ser.EncodeReservations(&reservations);
-  const List<byte>& payload = ser.sink()->data();
+  serializer->EncodeReservations(&reservations);
+  const List<byte>* payload = serializer->sink()->data();
 
   // Calculate sizes.
   int reservation_size = reservations.length() * kInt32Size;
-  int size = kHeaderSize + reservation_size + payload.length();
+  int size = kHeaderSize + reservation_size + payload->length();
 
   // Allocate backing store and create result data.
   AllocateData(size);
 
   // Set header values.
-  SetMagicNumber(ser.isolate());
+  SetMagicNumber(serializer->isolate());
   SetHeaderValue(kCheckSumOffset, Version::Hash());
   SetHeaderValue(kNumReservationsOffset, reservations.length());
-  SetHeaderValue(kPayloadLengthOffset, payload.length());
+  SetHeaderValue(kPayloadLengthOffset, payload->length());
 
   // Copy reservation chunk sizes.
   CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()),
             reservation_size);
 
   // Copy serialized data.
-  CopyBytes(data_ + kHeaderSize + reservation_size, payload.begin(),
-            static_cast<size_t>(payload.length()));
+  CopyBytes(data_ + kHeaderSize + reservation_size, payload->begin(),
+            static_cast<size_t>(payload->length()));
 }
 
 bool SnapshotData::IsSane() {
diff --git a/src/snapshot/snapshot-source-sink.h b/src/snapshot/snapshot-source-sink.h
index 360ec76..5d4c08d 100644
--- a/src/snapshot/snapshot-source-sink.h
+++ b/src/snapshot/snapshot-source-sink.h
@@ -94,7 +94,7 @@
   void PutRaw(const byte* data, int number_of_bytes, const char* description);
   int Position() { return data_.length(); }
 
-  const List<byte>& data() const { return data_; }
+  const List<byte>* data() const { return &data_; }
 
  private:
   List<byte> data_;
diff --git a/src/snapshot/snapshot.h b/src/snapshot/snapshot.h
index c648d75..e332967 100644
--- a/src/snapshot/snapshot.h
+++ b/src/snapshot/snapshot.h
@@ -16,84 +16,11 @@
 class PartialSerializer;
 class StartupSerializer;
 
-class Snapshot : public AllStatic {
- public:
-  class Metadata {
-   public:
-    explicit Metadata(uint32_t data = 0) : data_(data) {}
-    bool embeds_script() { return EmbedsScriptBits::decode(data_); }
-    void set_embeds_script(bool v) {
-      data_ = EmbedsScriptBits::update(data_, v);
-    }
-
-    uint32_t& RawValue() { return data_; }
-
-   private:
-    class EmbedsScriptBits : public BitField<bool, 0, 1> {};
-    uint32_t data_;
-  };
-
-  // Initialize the Isolate from the internal snapshot. Returns false if no
-  // snapshot could be found.
-  static bool Initialize(Isolate* isolate);
-  // Create a new context using the internal partial snapshot.
-  static MaybeHandle<Context> NewContextFromSnapshot(
-      Isolate* isolate, Handle<JSGlobalProxy> global_proxy);
-
-  static bool HaveASnapshotToStartFrom(Isolate* isolate);
-
-  static bool EmbedsScript(Isolate* isolate);
-
-  static uint32_t SizeOfFirstPage(Isolate* isolate, AllocationSpace space);
-
-
-  // To be implemented by the snapshot source.
-  static const v8::StartupData* DefaultSnapshotBlob();
-
-  static v8::StartupData CreateSnapshotBlob(
-      const StartupSerializer& startup_ser,
-      const PartialSerializer& context_ser, Snapshot::Metadata metadata);
-
-#ifdef DEBUG
-  static bool SnapshotIsValid(v8::StartupData* snapshot_blob);
-#endif  // DEBUG
-
- private:
-  static Vector<const byte> ExtractStartupData(const v8::StartupData* data);
-  static Vector<const byte> ExtractContextData(const v8::StartupData* data);
-  static Metadata ExtractMetadata(const v8::StartupData* data);
-
-  // Snapshot blob layout:
-  // [0] metadata
-  // [1 - 6] pre-calculated first page sizes for paged spaces
-  // [7] serialized start up data length
-  // ... serialized start up data
-  // ... serialized context data
-
-  static const int kNumPagedSpaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
-
-  static const int kMetadataOffset = 0;
-  static const int kFirstPageSizesOffset = kMetadataOffset + kInt32Size;
-  static const int kStartupLengthOffset =
-      kFirstPageSizesOffset + kNumPagedSpaces * kInt32Size;
-  static const int kStartupDataOffset = kStartupLengthOffset + kInt32Size;
-
-  static int ContextOffset(int startup_length) {
-    return kStartupDataOffset + startup_length;
-  }
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
-};
-
-#ifdef V8_USE_EXTERNAL_STARTUP_DATA
-void SetSnapshotFromFile(StartupData* snapshot_blob);
-#endif
-
 // Wrapper around reservation sizes and the serialization payload.
 class SnapshotData : public SerializedData {
  public:
   // Used when producing.
-  explicit SnapshotData(const Serializer& ser);
+  explicit SnapshotData(const Serializer* serializer);
 
   // Used when consuming.
   explicit SnapshotData(const Vector<const byte> snapshot)
@@ -124,6 +51,74 @@
   static const int kHeaderSize = kPayloadLengthOffset + kInt32Size;
 };
 
+class Snapshot : public AllStatic {
+ public:
+  // Initialize the Isolate from the internal snapshot. Returns false if no
+  // snapshot could be found.
+  static bool Initialize(Isolate* isolate);
+  // Create a new context using the internal partial snapshot.
+  static MaybeHandle<Context> NewContextFromSnapshot(
+      Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
+      size_t context_index);
+
+  static bool HaveASnapshotToStartFrom(Isolate* isolate);
+
+  static bool EmbedsScript(Isolate* isolate);
+
+  static uint32_t SizeOfFirstPage(Isolate* isolate, AllocationSpace space);
+
+
+  // To be implemented by the snapshot source.
+  static const v8::StartupData* DefaultSnapshotBlob();
+
+  static v8::StartupData CreateSnapshotBlob(
+      const SnapshotData* startup_snapshot,
+      const List<SnapshotData*>* context_snapshots);
+
+#ifdef DEBUG
+  static bool SnapshotIsValid(v8::StartupData* snapshot_blob);
+#endif  // DEBUG
+
+ private:
+  static int ExtractNumContexts(const v8::StartupData* data);
+  static Vector<const byte> ExtractStartupData(const v8::StartupData* data);
+  static Vector<const byte> ExtractContextData(const v8::StartupData* data,
+                                               int index);
+
+  // Snapshot blob layout:
+  // [0 - 5] pre-calculated first page sizes for paged spaces
+  // [6] number of contexts N
+  // [7] offset to context 0
+  // [8] offset to context 1
+  // ...
+  // ... offset to context N - 1
+  // ... startup snapshot data
+  // ... context 0 snapshot data
+  // ... context 1 snapshot data
+
+  static const int kNumPagedSpaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
+
+  static const int kFirstPageSizesOffset = 0;
+  static const int kNumberOfContextsOffset =
+      kFirstPageSizesOffset + kNumPagedSpaces * kInt32Size;
+  static const int kFirstContextOffsetOffset =
+      kNumberOfContextsOffset + kInt32Size;
+
+  static int StartupSnapshotOffset(int num_contexts) {
+    return kFirstContextOffsetOffset + num_contexts * kInt32Size;
+  }
+
+  static int ContextSnapshotOffsetOffset(int index) {
+    return kFirstContextOffsetOffset + index * kInt32Size;
+  }
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
+};
+
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+void SetSnapshotFromFile(StartupData* snapshot_blob);
+#endif
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/snapshot/startup-serializer.cc b/src/snapshot/startup-serializer.cc
index c3f9b3e..80598e8 100644
--- a/src/snapshot/startup-serializer.cc
+++ b/src/snapshot/startup-serializer.cc
@@ -11,10 +11,11 @@
 namespace internal {
 
 StartupSerializer::StartupSerializer(
-    Isolate* isolate, SnapshotByteSink* sink,
-    FunctionCodeHandling function_code_handling)
-    : Serializer(isolate, sink),
-      function_code_handling_(function_code_handling),
+    Isolate* isolate,
+    v8::SnapshotCreator::FunctionCodeHandling function_code_handling)
+    : Serializer(isolate),
+      clear_function_code_(function_code_handling ==
+                           v8::SnapshotCreator::FunctionCodeHandling::kClear),
       serializing_builtins_(false) {
   InitializeCodeAddressMap();
 }
@@ -27,21 +28,21 @@
                                         WhereToPoint where_to_point, int skip) {
   DCHECK(!obj->IsJSFunction());
 
-  if (function_code_handling_ == CLEAR_FUNCTION_CODE) {
+  if (clear_function_code_) {
     if (obj->IsCode()) {
       Code* code = Code::cast(obj);
       // If the function code is compiled (either as native code or bytecode),
       // replace it with lazy-compile builtin. Only exception is when we are
       // serializing the canonical interpreter-entry-trampoline builtin.
       if (code->kind() == Code::FUNCTION ||
-          (!serializing_builtins_ && code->is_interpreter_entry_trampoline())) {
+          (!serializing_builtins_ &&
+           code->is_interpreter_trampoline_builtin())) {
         obj = isolate()->builtins()->builtin(Builtins::kCompileLazy);
       }
     } else if (obj->IsBytecodeArray()) {
       obj = isolate()->heap()->undefined_value();
     }
   } else if (obj->IsCode()) {
-    DCHECK_EQ(KEEP_FUNCTION_CODE, function_code_handling_);
     Code* code = Code::cast(obj);
     if (code->kind() == Code::FUNCTION) {
       code->ClearInlineCaches();
@@ -49,6 +50,8 @@
     }
   }
 
+  if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
+
   int root_index = root_index_map_.Lookup(obj);
   // We can only encode roots as such if it has already been serialized.
   // That applies to root indices below the wave front.
@@ -59,12 +62,12 @@
     }
   }
 
-  if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
+  if (SerializeBackReference(obj, how_to_code, where_to_point, skip)) return;
 
   FlushSkip(skip);
 
   // Object has not yet been serialized.  Serialize it here.
-  ObjectSerializer object_serializer(this, obj, sink_, how_to_code,
+  ObjectSerializer object_serializer(this, obj, &sink_, how_to_code,
                                      where_to_point);
   object_serializer.Serialize();
 
@@ -89,11 +92,22 @@
   Pad();
 }
 
+int StartupSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
+  int index;
+  if (!partial_cache_index_map_.LookupOrInsert(heap_object, &index)) {
+    // This object is not part of the partial snapshot cache yet. Add it to the
+    // startup snapshot so we can refer to it via partial snapshot index from
+    // the partial snapshot.
+    VisitPointer(reinterpret_cast<Object**>(&heap_object));
+  }
+  return index;
+}
+
 void StartupSerializer::Synchronize(VisitorSynchronization::SyncTag tag) {
   // We expect the builtins tag after builtins have been serialized.
   DCHECK(!serializing_builtins_ || tag == VisitorSynchronization::kBuiltins);
   serializing_builtins_ = (tag == VisitorSynchronization::kHandleScope);
-  sink_->Put(kSynchronize, "Synchronize");
+  sink_.Put(kSynchronize, "Synchronize");
 }
 
 void StartupSerializer::SerializeStrongReferences() {
diff --git a/src/snapshot/startup-serializer.h b/src/snapshot/startup-serializer.h
index 71b8475..cc66f71 100644
--- a/src/snapshot/startup-serializer.h
+++ b/src/snapshot/startup-serializer.h
@@ -6,6 +6,7 @@
 #define V8_SNAPSHOT_STARTUP_SERIALIZER_H_
 
 #include <bitset>
+#include "include/v8.h"
 #include "src/snapshot/serializer.h"
 
 namespace v8 {
@@ -13,11 +14,9 @@
 
 class StartupSerializer : public Serializer {
  public:
-  enum FunctionCodeHandling { CLEAR_FUNCTION_CODE, KEEP_FUNCTION_CODE };
-
   StartupSerializer(
-      Isolate* isolate, SnapshotByteSink* sink,
-      FunctionCodeHandling function_code_handling = CLEAR_FUNCTION_CODE);
+      Isolate* isolate,
+      v8::SnapshotCreator::FunctionCodeHandling function_code_handling);
   ~StartupSerializer() override;
 
   // Serialize the current state of the heap.  The order is:
@@ -28,7 +27,34 @@
   void SerializeStrongReferences();
   void SerializeWeakReferencesAndDeferred();
 
+  int PartialSnapshotCacheIndex(HeapObject* o);
+
  private:
+  class PartialCacheIndexMap : public AddressMapBase {
+   public:
+    PartialCacheIndexMap()
+        : map_(base::HashMap::PointersMatch), next_index_(0) {}
+
+    // Lookup object in the map. Return its index if found, or create
+    // a new entry with new_index as value, and return kInvalidIndex.
+    bool LookupOrInsert(HeapObject* obj, int* index_out) {
+      base::HashMap::Entry* entry = LookupEntry(&map_, obj, false);
+      if (entry != NULL) {
+        *index_out = GetValue(entry);
+        return true;
+      }
+      *index_out = next_index_;
+      SetValue(LookupEntry(&map_, obj, true), next_index_++);
+      return false;
+    }
+
+   private:
+    base::HashMap map_;
+    int next_index_;
+
+    DISALLOW_COPY_AND_ASSIGN(PartialCacheIndexMap);
+  };
+
   // The StartupSerializer has to serialize the root array, which is slightly
   // different.
   void VisitPointers(Object** start, Object** end) override;
@@ -42,10 +68,11 @@
   // roots. In the second pass, we serialize the rest.
   bool RootShouldBeSkipped(int root_index);
 
-  FunctionCodeHandling function_code_handling_;
+  bool clear_function_code_;
   bool serializing_builtins_;
   bool serializing_immortal_immovables_roots_;
   std::bitset<Heap::kStrongRootListLength> root_has_been_serialized_;
+  PartialCacheIndexMap partial_cache_index_map_;
   DISALLOW_COPY_AND_ASSIGN(StartupSerializer);
 };
 
diff --git a/src/startup-data-util.cc b/src/startup-data-util.cc
index 4e0ad97..7c6d9eb 100644
--- a/src/startup-data-util.cc
+++ b/src/startup-data-util.cc
@@ -7,6 +7,7 @@
 #include <stdlib.h>
 #include <string.h>
 
+#include "src/base/file-utils.h"
 #include "src/base/logging.h"
 #include "src/base/platform/platform.h"
 #include "src/utils.h"
@@ -77,27 +78,6 @@
   atexit(&FreeStartupData);
 }
 
-
-char* RelativePath(char** buffer, const char* exec_path, const char* name) {
-  DCHECK(exec_path);
-  int path_separator = static_cast<int>(strlen(exec_path)) - 1;
-  while (path_separator >= 0 &&
-         !base::OS::isDirectorySeparator(exec_path[path_separator])) {
-    path_separator--;
-  }
-  if (path_separator >= 0) {
-    int name_length = static_cast<int>(strlen(name));
-    *buffer =
-        reinterpret_cast<char*>(calloc(path_separator + name_length + 2, 1));
-    *buffer[0] = '\0';
-    strncat(*buffer, exec_path, path_separator + 1);
-    strncat(*buffer, name, name_length);
-  } else {
-    *buffer = strdup(name);
-  }
-  return *buffer;
-}
-
 }  // namespace
 #endif  // V8_USE_EXTERNAL_STARTUP_DATA
 
diff --git a/src/string-builder.h b/src/string-builder.h
index 98bd82b..192603f 100644
--- a/src/string-builder.h
+++ b/src/string-builder.h
@@ -293,6 +293,14 @@
     }
   }
 
+  INLINE(void AppendCString(const uc16* s)) {
+    if (encoding_ == String::ONE_BYTE_ENCODING) {
+      while (*s != '\0') Append<uc16, uint8_t>(*(s++));
+    } else {
+      while (*s != '\0') Append<uc16, uc16>(*(s++));
+    }
+  }
+
   INLINE(bool CurrentPartCanFit(int length)) {
     return part_length_ - current_index_ > length;
   }
@@ -301,6 +309,8 @@
 
   MaybeHandle<String> Finish();
 
+  INLINE(bool HasOverflowed()) const { return overflowed_; }
+
   // Change encoding to two-byte.
   void ChangeEncoding() {
     DCHECK_EQ(String::ONE_BYTE_ENCODING, encoding_);
diff --git a/src/string-stream.cc b/src/string-stream.cc
index 02f6f1c..781f8cd 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -378,14 +378,14 @@
 
 
 void StringStream::PrintFixedArray(FixedArray* array, unsigned int limit) {
-  Heap* heap = array->GetHeap();
+  Isolate* isolate = array->GetIsolate();
   for (unsigned int i = 0; i < 10 && i < limit; i++) {
     Object* element = array->get(i);
-    if (element != heap->the_hole_value()) {
-      for (int len = 1; len < 18; len++)
-        Put(' ');
-      Add("%d: %o\n", i, array->get(i));
+    if (element->IsTheHole(isolate)) continue;
+    for (int len = 1; len < 18; len++) {
+      Put(' ');
     }
+    Add("%d: %o\n", i, array->get(i));
   }
   if (limit >= 10) {
     Add("                  ...\n");
@@ -527,19 +527,20 @@
   Object* name = fun->shared()->name();
   bool print_name = false;
   Isolate* isolate = fun->GetIsolate();
-  if (receiver->IsNull() || receiver->IsUndefined() || receiver->IsJSProxy()) {
+  if (receiver->IsNull(isolate) || receiver->IsUndefined(isolate) ||
+      receiver->IsTheHole(isolate) || receiver->IsJSProxy()) {
     print_name = true;
-  } else {
+  } else if (isolate->context() != nullptr) {
     if (!receiver->IsJSObject()) {
       receiver = receiver->GetRootMap(isolate)->prototype();
     }
 
     for (PrototypeIterator iter(isolate, JSObject::cast(receiver),
-                                PrototypeIterator::START_AT_RECEIVER);
+                                kStartAtReceiver);
          !iter.IsAtEnd(); iter.Advance()) {
       if (iter.GetCurrent()->IsJSProxy()) break;
       Object* key = iter.GetCurrent<JSObject>()->SlowReverseLookup(fun);
-      if (!key->IsUndefined()) {
+      if (!key->IsUndefined(isolate)) {
         if (!name->IsString() ||
             !key->IsString() ||
             !String::cast(name)->Equals(String::cast(key))) {
diff --git a/src/third_party/fdlibm/fdlibm.cc b/src/third_party/fdlibm/fdlibm.cc
deleted file mode 100644
index 0ef2301..0000000
--- a/src/third_party/fdlibm/fdlibm.cc
+++ /dev/null
@@ -1,228 +0,0 @@
-// The following is adapted from fdlibm (http://www.netlib.org/fdlibm).
-//
-// ====================================================
-// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
-//
-// Developed at SunSoft, a Sun Microsystems, Inc. business.
-// Permission to use, copy, modify, and distribute this
-// software is freely granted, provided that this notice
-// is preserved.
-// ====================================================
-//
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2014 the V8 project authors. All rights reserved.
-
-#include "src/third_party/fdlibm/fdlibm.h"
-
-#include <stdint.h>
-#include <cmath>
-#include <limits>
-
-#include "src/base/macros.h"
-#include "src/double.h"
-
-namespace v8 {
-namespace fdlibm {
-
-#ifdef _MSC_VER
-inline double scalbn(double x, int y) { return _scalb(x, y); }
-#endif  // _MSC_VER
-
-
-// Table of constants for 2/pi, 396 Hex digits (476 decimal) of 2/pi
-static const int two_over_pi[] = {
-    0xA2F983, 0x6E4E44, 0x1529FC, 0x2757D1, 0xF534DD, 0xC0DB62, 0x95993C,
-    0x439041, 0xFE5163, 0xABDEBB, 0xC561B7, 0x246E3A, 0x424DD2, 0xE00649,
-    0x2EEA09, 0xD1921C, 0xFE1DEB, 0x1CB129, 0xA73EE8, 0x8235F5, 0x2EBB44,
-    0x84E99C, 0x7026B4, 0x5F7E41, 0x3991D6, 0x398353, 0x39F49C, 0x845F8B,
-    0xBDF928, 0x3B1FF8, 0x97FFDE, 0x05980F, 0xEF2F11, 0x8B5A0A, 0x6D1F6D,
-    0x367ECF, 0x27CB09, 0xB74F46, 0x3F669E, 0x5FEA2D, 0x7527BA, 0xC7EBE5,
-    0xF17B3D, 0x0739F7, 0x8A5292, 0xEA6BFB, 0x5FB11F, 0x8D5D08, 0x560330,
-    0x46FC7B, 0x6BABF0, 0xCFBC20, 0x9AF436, 0x1DA9E3, 0x91615E, 0xE61B08,
-    0x659985, 0x5F14A0, 0x68408D, 0xFFD880, 0x4D7327, 0x310606, 0x1556CA,
-    0x73A8C9, 0x60E27B, 0xC08C6B};
-
-static const double zero = 0.0;
-static const double two24 = 1.6777216e+07;
-static const double one = 1.0;
-static const double twon24 = 5.9604644775390625e-08;
-
-static const double PIo2[] = {
-    1.57079625129699707031e+00,  // 0x3FF921FB, 0x40000000
-    7.54978941586159635335e-08,  // 0x3E74442D, 0x00000000
-    5.39030252995776476554e-15,  // 0x3CF84698, 0x80000000
-    3.28200341580791294123e-22,  // 0x3B78CC51, 0x60000000
-    1.27065575308067607349e-29,  // 0x39F01B83, 0x80000000
-    1.22933308981111328932e-36,  // 0x387A2520, 0x40000000
-    2.73370053816464559624e-44,  // 0x36E38222, 0x80000000
-    2.16741683877804819444e-51   // 0x3569F31D, 0x00000000
-};
-
-
-INLINE(int __kernel_rem_pio2(double* x, double* y, int e0, int nx)) {
-  static const int32_t jk = 3;
-  double fw;
-  int32_t jx = nx - 1;
-  int32_t jv = (e0 - 3) / 24;
-  if (jv < 0) jv = 0;
-  int32_t q0 = e0 - 24 * (jv + 1);
-  int32_t m = jx + jk;
-
-  double f[20];
-  for (int i = 0, j = jv - jx; i <= m; i++, j++) {
-    f[i] = (j < 0) ? zero : static_cast<double>(two_over_pi[j]);
-  }
-
-  double q[20];
-  for (int i = 0; i <= jk; i++) {
-    fw = 0.0;
-    for (int j = 0; j <= jx; j++) fw += x[j] * f[jx + i - j];
-    q[i] = fw;
-  }
-
-  int32_t jz = jk;
-
-recompute:
-
-  int32_t iq[20];
-  double z = q[jz];
-  for (int i = 0, j = jz; j > 0; i++, j--) {
-    fw = static_cast<double>(static_cast<int32_t>(twon24 * z));
-    iq[i] = static_cast<int32_t>(z - two24 * fw);
-    z = q[j - 1] + fw;
-  }
-
-  z = scalbn(z, q0);
-  z -= 8.0 * std::floor(z * 0.125);
-  int32_t n = static_cast<int32_t>(z);
-  z -= static_cast<double>(n);
-  int32_t ih = 0;
-  if (q0 > 0) {
-    int32_t i = (iq[jz - 1] >> (24 - q0));
-    n += i;
-    iq[jz - 1] -= i << (24 - q0);
-    ih = iq[jz - 1] >> (23 - q0);
-  } else if (q0 == 0) {
-    ih = iq[jz - 1] >> 23;
-  } else if (z >= 0.5) {
-    ih = 2;
-  }
-
-  if (ih > 0) {
-    n += 1;
-    int32_t carry = 0;
-    for (int i = 0; i < jz; i++) {
-      int32_t j = iq[i];
-      if (carry == 0) {
-        if (j != 0) {
-          carry = 1;
-          iq[i] = 0x1000000 - j;
-        }
-      } else {
-        iq[i] = 0xffffff - j;
-      }
-    }
-    if (q0 == 1) {
-      iq[jz - 1] &= 0x7fffff;
-    } else if (q0 == 2) {
-      iq[jz - 1] &= 0x3fffff;
-    }
-    if (ih == 2) {
-      z = one - z;
-      if (carry != 0) z -= scalbn(one, q0);
-    }
-  }
-
-  if (z == zero) {
-    int32_t j = 0;
-    for (int i = jz - 1; i >= jk; i--) j |= iq[i];
-    if (j == 0) {
-      int32_t k = 1;
-      while (iq[jk - k] == 0) k++;
-      for (int i = jz + 1; i <= jz + k; i++) {
-        f[jx + i] = static_cast<double>(two_over_pi[jv + i]);
-        for (j = 0, fw = 0.0; j <= jx; j++) fw += x[j] * f[jx + i - j];
-        q[i] = fw;
-      }
-      jz += k;
-      goto recompute;
-    }
-  }
-
-  if (z == 0.0) {
-    jz -= 1;
-    q0 -= 24;
-    while (iq[jz] == 0) {
-      jz--;
-      q0 -= 24;
-    }
-  } else {
-    z = scalbn(z, -q0);
-    if (z >= two24) {
-      fw = static_cast<double>(static_cast<int32_t>(twon24 * z));
-      iq[jz] = static_cast<int32_t>(z - two24 * fw);
-      jz += 1;
-      q0 += 24;
-      iq[jz] = static_cast<int32_t>(fw);
-    } else {
-      iq[jz] = static_cast<int32_t>(z);
-    }
-  }
-
-  fw = scalbn(one, q0);
-  for (int i = jz; i >= 0; i--) {
-    q[i] = fw * static_cast<double>(iq[i]);
-    fw *= twon24;
-  }
-
-  double fq[20];
-  for (int i = jz; i >= 0; i--) {
-    fw = 0.0;
-    for (int k = 0; k <= jk && k <= jz - i; k++) fw += PIo2[k] * q[i + k];
-    fq[jz - i] = fw;
-  }
-
-  fw = 0.0;
-  for (int i = jz; i >= 0; i--) fw += fq[i];
-  y[0] = (ih == 0) ? fw : -fw;
-  fw = fq[0] - fw;
-  for (int i = 1; i <= jz; i++) fw += fq[i];
-  y[1] = (ih == 0) ? fw : -fw;
-  return n & 7;
-}
-
-
-int rempio2(double x, double* y) {
-  int32_t hx = static_cast<int32_t>(internal::double_to_uint64(x) >> 32);
-  int32_t ix = hx & 0x7fffffff;
-
-  if (ix >= 0x7ff00000) {
-    *y = std::numeric_limits<double>::quiet_NaN();
-    return 0;
-  }
-
-  int32_t e0 = (ix >> 20) - 1046;
-  uint64_t zi = internal::double_to_uint64(x) & 0xFFFFFFFFu;
-  zi |= static_cast<uint64_t>(ix - (e0 << 20)) << 32;
-  double z = internal::uint64_to_double(zi);
-
-  double tx[3];
-  for (int i = 0; i < 2; i++) {
-    tx[i] = static_cast<double>(static_cast<int32_t>(z));
-    z = (z - tx[i]) * two24;
-  }
-  tx[2] = z;
-
-  int nx = 3;
-  while (tx[nx - 1] == zero) nx--;
-  int n = __kernel_rem_pio2(tx, y, e0, nx);
-  if (hx < 0) {
-    y[0] = -y[0];
-    y[1] = -y[1];
-    return -n;
-  }
-  return n;
-}
-}  // namespace internal
-}  // namespace v8
diff --git a/src/third_party/fdlibm/fdlibm.h b/src/third_party/fdlibm/fdlibm.h
deleted file mode 100644
index e417c8c..0000000
--- a/src/third_party/fdlibm/fdlibm.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// The following is adapted from fdlibm (http://www.netlib.org/fdlibm).
-//
-// ====================================================
-// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
-//
-// Developed at SunSoft, a Sun Microsystems, Inc. business.
-// Permission to use, copy, modify, and distribute this
-// software is freely granted, provided that this notice
-// is preserved.
-// ====================================================
-//
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2014 the V8 project authors. All rights reserved.
-
-#ifndef V8_FDLIBM_H_
-#define V8_FDLIBM_H_
-
-namespace v8 {
-namespace fdlibm {
-
-int rempio2(double x, double* y);
-
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_FDLIBM_H_
diff --git a/src/third_party/fdlibm/fdlibm.js b/src/third_party/fdlibm/fdlibm.js
index a5e789f..26ef126 100644
--- a/src/third_party/fdlibm/fdlibm.js
+++ b/src/third_party/fdlibm/fdlibm.js
@@ -16,9 +16,6 @@
 // The following is a straightforward translation of fdlibm routines
 // by Raymond Toy (rtoy@google.com).
 
-// rempio2result is used as a container for return values of %RemPiO2. It is
-// initialized to a two-element Float64Array during genesis.
-
 (function(global, utils) {
   
 "use strict";
@@ -28,735 +25,15 @@
 // -------------------------------------------------------------------
 // Imports
 
-var GlobalFloat64Array = global.Float64Array;
 var GlobalMath = global.Math;
 var MathAbs;
-var MathExp;
-var NaN = %GetRootNaN();
-var rempio2result;
+var MathExpm1;
 
 utils.Import(function(from) {
   MathAbs = from.MathAbs;
-  MathExp = from.MathExp;
+  MathExpm1 = from.MathExpm1;
 });
 
-utils.CreateDoubleResultArray = function(global) {
-  rempio2result = new GlobalFloat64Array(2);
-};
-
-// -------------------------------------------------------------------
-
-define INVPIO2 = 6.36619772367581382433e-01;
-define PIO2_1  = 1.57079632673412561417;
-define PIO2_1T = 6.07710050650619224932e-11;
-define PIO2_2  = 6.07710050630396597660e-11;
-define PIO2_2T = 2.02226624879595063154e-21;
-define PIO2_3  = 2.02226624871116645580e-21;
-define PIO2_3T = 8.47842766036889956997e-32;
-define PIO4    = 7.85398163397448278999e-01;
-define PIO4LO  = 3.06161699786838301793e-17;
-
-// Compute k and r such that x - k*pi/2 = r where |r| < pi/4. For
-// precision, r is returned as two values y0 and y1 such that r = y0 + y1
-// to more than double precision.
-
-macro REMPIO2(X)
-  var n, y0, y1;
-  var hx = %_DoubleHi(X);
-  var ix = hx & 0x7fffffff;
-
-  if (ix < 0x4002d97c) {
-    // |X| ~< 3*pi/4, special case with n = +/- 1
-    if (hx > 0) {
-      var z = X - PIO2_1;
-      if (ix != 0x3ff921fb) {
-        // 33+53 bit pi is good enough
-        y0 = z - PIO2_1T;
-        y1 = (z - y0) - PIO2_1T;
-      } else {
-        // near pi/2, use 33+33+53 bit pi
-        z -= PIO2_2;
-        y0 = z - PIO2_2T;
-        y1 = (z - y0) - PIO2_2T;
-      }
-      n = 1;
-    } else {
-      // Negative X
-      var z = X + PIO2_1;
-      if (ix != 0x3ff921fb) {
-        // 33+53 bit pi is good enough
-        y0 = z + PIO2_1T;
-        y1 = (z - y0) + PIO2_1T;
-      } else {
-        // near pi/2, use 33+33+53 bit pi
-        z += PIO2_2;
-        y0 = z + PIO2_2T;
-        y1 = (z - y0) + PIO2_2T;
-      }
-      n = -1;
-    }
-  } else if (ix <= 0x413921fb) {
-    // |X| ~<= 2^19*(pi/2), medium size
-    var t = MathAbs(X);
-    n = (t * INVPIO2 + 0.5) | 0;
-    var r = t - n * PIO2_1;
-    var w = n * PIO2_1T;
-    // First round good to 85 bit
-    y0 = r - w;
-    if (ix - (%_DoubleHi(y0) & 0x7ff00000) > 0x1000000) {
-      // 2nd iteration needed, good to 118
-      t = r;
-      w = n * PIO2_2;
-      r = t - w;
-      w = n * PIO2_2T - ((t - r) - w);
-      y0 = r - w;
-      if (ix - (%_DoubleHi(y0) & 0x7ff00000) > 0x3100000) {
-        // 3rd iteration needed. 151 bits accuracy
-        t = r;
-        w = n * PIO2_3;
-        r = t - w;
-        w = n * PIO2_3T - ((t - r) - w);
-        y0 = r - w;
-      }
-    }
-    y1 = (r - y0) - w;
-    if (hx < 0) {
-      n = -n;
-      y0 = -y0;
-      y1 = -y1;
-    }
-  } else {
-    // Need to do full Payne-Hanek reduction here.
-    n = %RemPiO2(X, rempio2result);
-    y0 = rempio2result[0];
-    y1 = rempio2result[1];
-  }
-endmacro
-
-
-// __kernel_sin(X, Y, IY)
-// kernel sin function on [-pi/4, pi/4], pi/4 ~ 0.7854
-// Input X is assumed to be bounded by ~pi/4 in magnitude.
-// Input Y is the tail of X so that x = X + Y.
-//
-// Algorithm
-//  1. Since ieee_sin(-x) = -ieee_sin(x), we need only to consider positive x.
-//  2. ieee_sin(x) is approximated by a polynomial of degree 13 on
-//     [0,pi/4]
-//                           3            13
-//          sin(x) ~ x + S1*x + ... + S6*x
-//     where
-//
-//    |ieee_sin(x)    2     4     6     8     10     12  |     -58
-//    |----- - (1+S1*x +S2*x +S3*x +S4*x +S5*x  +S6*x   )| <= 2
-//    |  x                                               |
-//
-//  3. ieee_sin(X+Y) = ieee_sin(X) + sin'(X')*Y
-//              ~ ieee_sin(X) + (1-X*X/2)*Y
-//     For better accuracy, let
-//               3      2      2      2      2
-//          r = X *(S2+X *(S3+X *(S4+X *(S5+X *S6))))
-//     then                   3    2
-//          sin(x) = X + (S1*X + (X *(r-Y/2)+Y))
-//
-define S1 = -1.66666666666666324348e-01;
-define S2 = 8.33333333332248946124e-03;
-define S3 = -1.98412698298579493134e-04;
-define S4 = 2.75573137070700676789e-06;
-define S5 = -2.50507602534068634195e-08;
-define S6 = 1.58969099521155010221e-10;
-
-macro RETURN_KERNELSIN(X, Y, SIGN)
-  var z = X * X;
-  var v = z * X;
-  var r = S2 + z * (S3 + z * (S4 + z * (S5 + z * S6)));
-  return (X - ((z * (0.5 * Y - v * r) - Y) - v * S1)) SIGN;
-endmacro
-
-// __kernel_cos(X, Y)
-// kernel cos function on [-pi/4, pi/4], pi/4 ~ 0.785398164
-// Input X is assumed to be bounded by ~pi/4 in magnitude.
-// Input Y is the tail of X so that x = X + Y.
-//
-// Algorithm
-//  1. Since ieee_cos(-x) = ieee_cos(x), we need only to consider positive x.
-//  2. ieee_cos(x) is approximated by a polynomial of degree 14 on
-//     [0,pi/4]
-//                                   4            14
-//          cos(x) ~ 1 - x*x/2 + C1*x + ... + C6*x
-//     where the remez error is
-//
-//  |                   2     4     6     8     10    12     14 |     -58
-//  |ieee_cos(x)-(1-.5*x +C1*x +C2*x +C3*x +C4*x +C5*x  +C6*x  )| <= 2
-//  |                                                           |
-//
-//                 4     6     8     10    12     14
-//  3. let r = C1*x +C2*x +C3*x +C4*x +C5*x  +C6*x  , then
-//         ieee_cos(x) = 1 - x*x/2 + r
-//     since ieee_cos(X+Y) ~ ieee_cos(X) - ieee_sin(X)*Y
-//                    ~ ieee_cos(X) - X*Y,
-//     a correction term is necessary in ieee_cos(x) and hence
-//         cos(X+Y) = 1 - (X*X/2 - (r - X*Y))
-//     For better accuracy when x > 0.3, let qx = |x|/4 with
-//     the last 32 bits mask off, and if x > 0.78125, let qx = 0.28125.
-//     Then
-//         cos(X+Y) = (1-qx) - ((X*X/2-qx) - (r-X*Y)).
-//     Note that 1-qx and (X*X/2-qx) is EXACT here, and the
-//     magnitude of the latter is at least a quarter of X*X/2,
-//     thus, reducing the rounding error in the subtraction.
-//
-define C1 = 4.16666666666666019037e-02;
-define C2 = -1.38888888888741095749e-03;
-define C3 = 2.48015872894767294178e-05;
-define C4 = -2.75573143513906633035e-07;
-define C5 = 2.08757232129817482790e-09;
-define C6 = -1.13596475577881948265e-11;
-
-macro RETURN_KERNELCOS(X, Y, SIGN)
-  var ix = %_DoubleHi(X) & 0x7fffffff;
-  var z = X * X;
-  var r = z * (C1 + z * (C2 + z * (C3 + z * (C4 + z * (C5 + z * C6)))));
-  if (ix < 0x3fd33333) {  // |x| ~< 0.3
-    return (1 - (0.5 * z - (z * r - X * Y))) SIGN;
-  } else {
-    var qx;
-    if (ix > 0x3fe90000) {  // |x| > 0.78125
-      qx = 0.28125;
-    } else {
-      qx = %_ConstructDouble(%_DoubleHi(0.25 * X), 0);
-    }
-    var hz = 0.5 * z - qx;
-    return (1 - qx - (hz - (z * r - X * Y))) SIGN;
-  }
-endmacro
-
-
-// kernel tan function on [-pi/4, pi/4], pi/4 ~ 0.7854
-// Input x is assumed to be bounded by ~pi/4 in magnitude.
-// Input y is the tail of x.
-// Input k indicates whether ieee_tan (if k = 1) or -1/tan (if k = -1)
-// is returned.
-//
-// Algorithm
-//  1. Since ieee_tan(-x) = -ieee_tan(x), we need only to consider positive x.
-//  2. if x < 2^-28 (hx<0x3e300000 0), return x with inexact if x!=0.
-//  3. ieee_tan(x) is approximated by a odd polynomial of degree 27 on
-//     [0,0.67434]
-//                           3             27
-//          tan(x) ~ x + T1*x + ... + T13*x
-//     where
-//
-//     |ieee_tan(x)    2     4            26   |     -59.2
-//     |----- - (1+T1*x +T2*x +.... +T13*x    )| <= 2
-//     |  x                                    |
-//
-//     Note: ieee_tan(x+y) = ieee_tan(x) + tan'(x)*y
-//                    ~ ieee_tan(x) + (1+x*x)*y
-//     Therefore, for better accuracy in computing ieee_tan(x+y), let
-//               3      2      2       2       2
-//          r = x *(T2+x *(T3+x *(...+x *(T12+x *T13))))
-//     then
-//                              3    2
-//          tan(x+y) = x + (T1*x + (x *(r+y)+y))
-//
-//  4. For x in [0.67434,pi/4],  let y = pi/4 - x, then
-//          tan(x) = ieee_tan(pi/4-y) = (1-ieee_tan(y))/(1+ieee_tan(y))
-//                 = 1 - 2*(ieee_tan(y) - (ieee_tan(y)^2)/(1+ieee_tan(y)))
-//
-// Set returnTan to 1 for tan; -1 for cot.  Anything else is illegal
-// and will cause incorrect results.
-//
-define T00 = 3.33333333333334091986e-01;
-define T01 = 1.33333333333201242699e-01;
-define T02 = 5.39682539762260521377e-02;
-define T03 = 2.18694882948595424599e-02;
-define T04 = 8.86323982359930005737e-03;
-define T05 = 3.59207910759131235356e-03;
-define T06 = 1.45620945432529025516e-03;
-define T07 = 5.88041240820264096874e-04;
-define T08 = 2.46463134818469906812e-04;
-define T09 = 7.81794442939557092300e-05;
-define T10 = 7.14072491382608190305e-05;
-define T11 = -1.85586374855275456654e-05;
-define T12 = 2.59073051863633712884e-05;
-
-function KernelTan(x, y, returnTan) {
-  var z;
-  var w;
-  var hx = %_DoubleHi(x);
-  var ix = hx & 0x7fffffff;
-
-  if (ix < 0x3e300000) {  // |x| < 2^-28
-    if (((ix | %_DoubleLo(x)) | (returnTan + 1)) == 0) {
-      // x == 0 && returnTan = -1
-      return 1 / MathAbs(x);
-    } else {
-      if (returnTan == 1) {
-        return x;
-      } else {
-        // Compute -1/(x + y) carefully
-        var w = x + y;
-        var z = %_ConstructDouble(%_DoubleHi(w), 0);
-        var v = y - (z - x);
-        var a = -1 / w;
-        var t = %_ConstructDouble(%_DoubleHi(a), 0);
-        var s = 1 + t * z;
-        return t + a * (s + t * v);
-      }
-    }
-  }
-  if (ix >= 0x3fe59428) {  // |x| > .6744
-    if (x < 0) {
-      x = -x;
-      y = -y;
-    }
-    z = PIO4 - x;
-    w = PIO4LO - y;
-    x = z + w;
-    y = 0;
-  }
-  z = x * x;
-  w = z * z;
-
-  // Break x^5 * (T1 + x^2*T2 + ...) into
-  // x^5 * (T1 + x^4*T3 + ... + x^20*T11) +
-  // x^5 * (x^2 * (T2 + x^4*T4 + ... + x^22*T12))
-  var r = T01 + w * (T03 + w * (T05 +
-                w * (T07 + w * (T09 + w * T11))));
-  var v = z * (T02 + w * (T04 + w * (T06 +
-                     w * (T08 + w * (T10 + w * T12)))));
-  var s = z * x;
-  r = y + z * (s * (r + v) + y);
-  r = r + T00 * s;
-  w = x + r;
-  if (ix >= 0x3fe59428) {
-    return (1 - ((hx >> 30) & 2)) *
-      (returnTan - 2.0 * (x - (w * w / (w + returnTan) - r)));
-  }
-  if (returnTan == 1) {
-    return w;
-  } else {
-    z = %_ConstructDouble(%_DoubleHi(w), 0);
-    v = r - (z - x);
-    var a = -1 / w;
-    var t = %_ConstructDouble(%_DoubleHi(a), 0);
-    s = 1 + t * z;
-    return t + a * (s + t * v);
-  }
-}
-
-function MathSinSlow(x) {
-  REMPIO2(x);
-  var sign = 1 - (n & 2);
-  if (n & 1) {
-    RETURN_KERNELCOS(y0, y1, * sign);
-  } else {
-    RETURN_KERNELSIN(y0, y1, * sign);
-  }
-}
-
-function MathCosSlow(x) {
-  REMPIO2(x);
-  if (n & 1) {
-    var sign = (n & 2) - 1;
-    RETURN_KERNELSIN(y0, y1, * sign);
-  } else {
-    var sign = 1 - (n & 2);
-    RETURN_KERNELCOS(y0, y1, * sign);
-  }
-}
-
-// ECMA 262 - 15.8.2.16
-function MathSin(x) {
-  x = +x;  // Convert to number.
-  if ((%_DoubleHi(x) & 0x7fffffff) <= 0x3fe921fb) {
-    // |x| < pi/4, approximately.  No reduction needed.
-    RETURN_KERNELSIN(x, 0, /* empty */);
-  }
-  return +MathSinSlow(x);
-}
-
-// ECMA 262 - 15.8.2.7
-function MathCos(x) {
-  x = +x;  // Convert to number.
-  if ((%_DoubleHi(x) & 0x7fffffff) <= 0x3fe921fb) {
-    // |x| < pi/4, approximately.  No reduction needed.
-    RETURN_KERNELCOS(x, 0, /* empty */);
-  }
-  return +MathCosSlow(x);
-}
-
-// ECMA 262 - 15.8.2.18
-function MathTan(x) {
-  x = x * 1;  // Convert to number.
-  if ((%_DoubleHi(x) & 0x7fffffff) <= 0x3fe921fb) {
-    // |x| < pi/4, approximately.  No reduction needed.
-    return KernelTan(x, 0, 1);
-  }
-  REMPIO2(x);
-  return KernelTan(y0, y1, (n & 1) ? -1 : 1);
-}
-
-// ES6 draft 09-27-13, section 20.2.2.20.
-// Math.log1p
-//
-// Method :                  
-//   1. Argument Reduction: find k and f such that 
-//                      1+x = 2^k * (1+f), 
-//         where  sqrt(2)/2 < 1+f < sqrt(2) .
-//
-//      Note. If k=0, then f=x is exact. However, if k!=0, then f
-//      may not be representable exactly. In that case, a correction
-//      term is need. Let u=1+x rounded. Let c = (1+x)-u, then
-//      log(1+x) - log(u) ~ c/u. Thus, we proceed to compute log(u),
-//      and add back the correction term c/u.
-//      (Note: when x > 2**53, one can simply return log(x))
-//
-//   2. Approximation of log1p(f).
-//      Let s = f/(2+f) ; based on log(1+f) = log(1+s) - log(1-s)
-//            = 2s + 2/3 s**3 + 2/5 s**5 + .....,
-//            = 2s + s*R
-//      We use a special Reme algorithm on [0,0.1716] to generate 
-//      a polynomial of degree 14 to approximate R The maximum error 
-//      of this polynomial approximation is bounded by 2**-58.45. In
-//      other words,
-//                      2      4      6      8      10      12      14
-//          R(z) ~ Lp1*s +Lp2*s +Lp3*s +Lp4*s +Lp5*s  +Lp6*s  +Lp7*s
-//      (the values of Lp1 to Lp7 are listed in the program)
-//      and
-//          |      2          14          |     -58.45
-//          | Lp1*s +...+Lp7*s    -  R(z) | <= 2 
-//          |                             |
-//      Note that 2s = f - s*f = f - hfsq + s*hfsq, where hfsq = f*f/2.
-//      In order to guarantee error in log below 1ulp, we compute log
-//      by
-//              log1p(f) = f - (hfsq - s*(hfsq+R)).
-//
-//      3. Finally, log1p(x) = k*ln2 + log1p(f).  
-//                           = k*ln2_hi+(f-(hfsq-(s*(hfsq+R)+k*ln2_lo)))
-//         Here ln2 is split into two floating point number: 
-//                      ln2_hi + ln2_lo,
-//         where n*ln2_hi is always exact for |n| < 2000.
-//
-// Special cases:
-//      log1p(x) is NaN with signal if x < -1 (including -INF) ; 
-//      log1p(+INF) is +INF; log1p(-1) is -INF with signal;
-//      log1p(NaN) is that NaN with no signal.
-//
-// Accuracy:
-//      according to an error analysis, the error is always less than
-//      1 ulp (unit in the last place).
-//
-// Constants:
-//      Constants are found in fdlibm.cc. We assume the C++ compiler to convert
-//      from decimal to binary accurately enough to produce the intended values.
-//
-// Note: Assuming log() return accurate answer, the following
-//       algorithm can be used to compute log1p(x) to within a few ULP:
-//
-//              u = 1+x;
-//              if (u==1.0) return x ; else
-//                          return log(u)*(x/(u-1.0));
-//
-//       See HP-15C Advanced Functions Handbook, p.193.
-//
-define LN2_HI    = 6.93147180369123816490e-01;
-define LN2_LO    = 1.90821492927058770002e-10;
-define TWO_THIRD = 6.666666666666666666e-01;
-define LP1 = 6.666666666666735130e-01;
-define LP2 = 3.999999999940941908e-01;
-define LP3 = 2.857142874366239149e-01;
-define LP4 = 2.222219843214978396e-01;
-define LP5 = 1.818357216161805012e-01;
-define LP6 = 1.531383769920937332e-01;
-define LP7 = 1.479819860511658591e-01;
-
-// 2^54
-define TWO54 = 18014398509481984;
-
-function MathLog1p(x) {
-  x = x * 1;  // Convert to number.
-  var hx = %_DoubleHi(x);
-  var ax = hx & 0x7fffffff;
-  var k = 1;
-  var f = x;
-  var hu = 1;
-  var c = 0;
-  var u = x;
-
-  if (hx < 0x3fda827a) {
-    // x < 0.41422
-    if (ax >= 0x3ff00000) {  // |x| >= 1
-      if (x === -1) {
-        return -INFINITY;  // log1p(-1) = -inf
-      } else {
-        return NaN;  // log1p(x<-1) = NaN
-      }
-    } else if (ax < 0x3c900000)  {
-      // For |x| < 2^-54 we can return x.
-      return x;
-    } else if (ax < 0x3e200000) {
-      // For |x| < 2^-29 we can use a simple two-term Taylor series.
-      return x - x * x * 0.5;
-    }
-
-    if ((hx > 0) || (hx <= -0x402D413D)) {  // (int) 0xbfd2bec3 = -0x402d413d
-      // -.2929 < x < 0.41422
-      k = 0;
-    }
-  }
-
-  // Handle Infinity and NaN
-  if (hx >= 0x7ff00000) return x;
-
-  if (k !== 0) {
-    if (hx < 0x43400000) {
-      // x < 2^53
-      u = 1 + x;
-      hu = %_DoubleHi(u);
-      k = (hu >> 20) - 1023;
-      c = (k > 0) ? 1 - (u - x) : x - (u - 1);
-      c = c / u;
-    } else {
-      hu = %_DoubleHi(u);
-      k = (hu >> 20) - 1023;
-    }
-    hu = hu & 0xfffff;
-    if (hu < 0x6a09e) {
-      u = %_ConstructDouble(hu | 0x3ff00000, %_DoubleLo(u));  // Normalize u.
-    } else {
-      ++k;
-      u = %_ConstructDouble(hu | 0x3fe00000, %_DoubleLo(u));  // Normalize u/2.
-      hu = (0x00100000 - hu) >> 2;
-    }
-    f = u - 1;
-  }
-
-  var hfsq = 0.5 * f * f;
-  if (hu === 0) {
-    // |f| < 2^-20;
-    if (f === 0) {
-      if (k === 0) {
-        return 0.0;
-      } else {
-        return k * LN2_HI + (c + k * LN2_LO);
-      }
-    }
-    var R = hfsq * (1 - TWO_THIRD * f);
-    if (k === 0) {
-      return f - R;
-    } else {
-      return k * LN2_HI - ((R - (k * LN2_LO + c)) - f);
-    }
-  }
-
-  var s = f / (2 + f); 
-  var z = s * s;
-  var R = z * (LP1 + z * (LP2 + z * (LP3 + z * (LP4 +
-          z * (LP5 + z * (LP6 + z * LP7))))));
-  if (k === 0) {
-    return f - (hfsq - s * (hfsq + R));
-  } else {
-    return k * LN2_HI - ((hfsq - (s * (hfsq + R) + (k * LN2_LO + c))) - f);
-  }
-}
-
-// ES6 draft 09-27-13, section 20.2.2.14.
-// Math.expm1
-// Returns exp(x)-1, the exponential of x minus 1.
-//
-// Method
-//   1. Argument reduction:
-//      Given x, find r and integer k such that
-//
-//               x = k*ln2 + r,  |r| <= 0.5*ln2 ~ 0.34658  
-//
-//      Here a correction term c will be computed to compensate 
-//      the error in r when rounded to a floating-point number.
-//
-//   2. Approximating expm1(r) by a special rational function on
-//      the interval [0,0.34658]:
-//      Since
-//          r*(exp(r)+1)/(exp(r)-1) = 2+ r^2/6 - r^4/360 + ...
-//      we define R1(r*r) by
-//          r*(exp(r)+1)/(exp(r)-1) = 2+ r^2/6 * R1(r*r)
-//      That is,
-//          R1(r**2) = 6/r *((exp(r)+1)/(exp(r)-1) - 2/r)
-//                   = 6/r * ( 1 + 2.0*(1/(exp(r)-1) - 1/r))
-//                   = 1 - r^2/60 + r^4/2520 - r^6/100800 + ...
-//      We use a special Remes algorithm on [0,0.347] to generate 
-//      a polynomial of degree 5 in r*r to approximate R1. The 
-//      maximum error of this polynomial approximation is bounded 
-//      by 2**-61. In other words,
-//          R1(z) ~ 1.0 + Q1*z + Q2*z**2 + Q3*z**3 + Q4*z**4 + Q5*z**5
-//      where   Q1  =  -1.6666666666666567384E-2,
-//              Q2  =   3.9682539681370365873E-4,
-//              Q3  =  -9.9206344733435987357E-6,
-//              Q4  =   2.5051361420808517002E-7,
-//              Q5  =  -6.2843505682382617102E-9;
-//      (where z=r*r, and the values of Q1 to Q5 are listed below)
-//      with error bounded by
-//          |                  5           |     -61
-//          | 1.0+Q1*z+...+Q5*z   -  R1(z) | <= 2 
-//          |                              |
-//
-//      expm1(r) = exp(r)-1 is then computed by the following 
-//      specific way which minimize the accumulation rounding error: 
-//                             2     3
-//                            r     r    [ 3 - (R1 + R1*r/2)  ]
-//            expm1(r) = r + --- + --- * [--------------------]
-//                            2     2    [ 6 - r*(3 - R1*r/2) ]
-//
-//      To compensate the error in the argument reduction, we use
-//              expm1(r+c) = expm1(r) + c + expm1(r)*c 
-//                         ~ expm1(r) + c + r*c 
-//      Thus c+r*c will be added in as the correction terms for
-//      expm1(r+c). Now rearrange the term to avoid optimization 
-//      screw up:
-//                      (      2                                    2 )
-//                      ({  ( r    [ R1 -  (3 - R1*r/2) ]  )  }    r  )
-//       expm1(r+c)~r - ({r*(--- * [--------------------]-c)-c} - --- )
-//                      ({  ( 2    [ 6 - r*(3 - R1*r/2) ]  )  }    2  )
-//                      (                                             )
-//
-//                 = r - E
-//   3. Scale back to obtain expm1(x):
-//      From step 1, we have
-//         expm1(x) = either 2^k*[expm1(r)+1] - 1
-//                  = or     2^k*[expm1(r) + (1-2^-k)]
-//   4. Implementation notes:
-//      (A). To save one multiplication, we scale the coefficient Qi
-//           to Qi*2^i, and replace z by (x^2)/2.
-//      (B). To achieve maximum accuracy, we compute expm1(x) by
-//        (i)   if x < -56*ln2, return -1.0, (raise inexact if x!=inf)
-//        (ii)  if k=0, return r-E
-//        (iii) if k=-1, return 0.5*(r-E)-0.5
-//        (iv)  if k=1 if r < -0.25, return 2*((r+0.5)- E)
-//                     else          return  1.0+2.0*(r-E);
-//        (v)   if (k<-2||k>56) return 2^k(1-(E-r)) - 1 (or exp(x)-1)
-//        (vi)  if k <= 20, return 2^k((1-2^-k)-(E-r)), else
-//        (vii) return 2^k(1-((E+2^-k)-r)) 
-//
-// Special cases:
-//      expm1(INF) is INF, expm1(NaN) is NaN;
-//      expm1(-INF) is -1, and
-//      for finite argument, only expm1(0)=0 is exact.
-//
-// Accuracy:
-//      according to an error analysis, the error is always less than
-//      1 ulp (unit in the last place).
-//
-// Misc. info.
-//      For IEEE double 
-//          if x > 7.09782712893383973096e+02 then expm1(x) overflow
-//
-define KEXPM1_OVERFLOW = 7.09782712893383973096e+02;
-define INVLN2          = 1.44269504088896338700;
-define EXPM1_1 = -3.33333333333331316428e-02;
-define EXPM1_2 = 1.58730158725481460165e-03;
-define EXPM1_3 = -7.93650757867487942473e-05;
-define EXPM1_4 = 4.00821782732936239552e-06;
-define EXPM1_5 = -2.01099218183624371326e-07;
-
-function MathExpm1(x) {
-  x = x * 1;  // Convert to number.
-  var y;
-  var hi;
-  var lo;
-  var k;
-  var t;
-  var c;
-    
-  var hx = %_DoubleHi(x);
-  var xsb = hx & 0x80000000;     // Sign bit of x
-  var y = (xsb === 0) ? x : -x;  // y = |x|
-  hx &= 0x7fffffff;              // High word of |x|
-
-  // Filter out huge and non-finite argument
-  if (hx >= 0x4043687a) {     // if |x| ~=> 56 * ln2
-    if (hx >= 0x40862e42) {   // if |x| >= 709.78
-      if (hx >= 0x7ff00000) {
-        // expm1(inf) = inf; expm1(-inf) = -1; expm1(nan) = nan;
-        return (x === -INFINITY) ? -1 : x;
-      }
-      if (x > KEXPM1_OVERFLOW) return INFINITY;  // Overflow
-    }
-    if (xsb != 0) return -1;  // x < -56 * ln2, return -1.
-  }
-
-  // Argument reduction
-  if (hx > 0x3fd62e42) {    // if |x| > 0.5 * ln2
-    if (hx < 0x3ff0a2b2) {  // and |x| < 1.5 * ln2
-      if (xsb === 0) {
-        hi = x - LN2_HI;
-        lo = LN2_LO;
-        k = 1;
-      } else {
-        hi = x + LN2_HI;
-        lo = -LN2_LO;
-        k = -1;
-      }
-    } else {
-      k = (INVLN2 * x + ((xsb === 0) ? 0.5 : -0.5)) | 0;
-      t = k;
-      // t * ln2_hi is exact here.
-      hi = x - t * LN2_HI;
-      lo = t * LN2_LO;
-    }
-    x = hi - lo;
-    c = (hi - x) - lo;
-  } else if (hx < 0x3c900000)	{
-    // When |x| < 2^-54, we can return x.
-    return x;
-  } else {
-    // Fall through.
-    k = 0;
-  }
-
-  // x is now in primary range
-  var hfx = 0.5 * x;
-  var hxs = x * hfx;
-  var r1 = 1 + hxs * (EXPM1_1 + hxs * (EXPM1_2 + hxs *
-                     (EXPM1_3 + hxs * (EXPM1_4 + hxs * EXPM1_5))));
-  t = 3 - r1 * hfx;
-  var e = hxs * ((r1 - t) / (6 - x * t));
-  if (k === 0) {  // c is 0
-    return x - (x*e - hxs);
-  } else {
-    e = (x * (e - c) - c);
-    e -= hxs;
-    if (k === -1) return 0.5 * (x - e) - 0.5;
-    if (k === 1) {
-      if (x < -0.25) return -2 * (e - (x + 0.5));
-      return 1 + 2 * (x - e);
-    }
-
-    if (k <= -2 || k > 56) {
-      // suffice to return exp(x) + 1
-      y = 1 - (e - x);
-      // Add k to y's exponent
-      y = %_ConstructDouble(%_DoubleHi(y) + (k << 20), %_DoubleLo(y));
-      return y - 1;
-    }
-    if (k < 20) {
-      // t = 1 - 2^k
-      t = %_ConstructDouble(0x3ff00000 - (0x200000 >> k), 0);
-      y = t - (e - x);
-      // Add k to y's exponent
-      y = %_ConstructDouble(%_DoubleHi(y) + (k << 20), %_DoubleLo(y));
-    } else {
-      // t = 2^-k
-      t = %_ConstructDouble((0x3ff - k) << 20, 0);
-      y = x - (e + t);
-      y += 1;
-      // Add k to y's exponent
-      y = %_ConstructDouble(%_DoubleHi(y) + (k << 20), %_DoubleLo(y));
-    }
-  }
-  return y;
-}
-
-
 // ES6 draft 09-27-13, section 20.2.2.30.
 // Math.sinh
 // Method :
@@ -792,11 +69,11 @@
     return h * (t + t / (t + 1));
   }
   // |x| in [22, log(maxdouble)], return 0.5 * exp(|x|)
-  if (ax < LOG_MAXD) return h * MathExp(ax);
+  if (ax < LOG_MAXD) return h * %math_exp(ax);
   // |x| in [log(maxdouble), overflowthreshold]
   // overflowthreshold = 710.4758600739426
   if (ax <= KSINH_OVERFLOW) {
-    var w = MathExp(0.5 * ax);
+    var w = %math_exp(0.5 * ax);
     var t = h * w;
     return t * w;
   }
@@ -842,14 +119,14 @@
   }
   // |x| in [0.5*log2, 22], return (exp(|x|)+1/exp(|x|)/2
   if (ix < 0x40360000) {
-    var t = MathExp(MathAbs(x));
+    var t = %math_exp(MathAbs(x));
     return 0.5 * t + 0.5 / t;
   }
   // |x| in [22, log(maxdouble)], return half*exp(|x|)
-  if (ix < 0x40862e42) return 0.5 * MathExp(MathAbs(x));
+  if (ix < 0x40862e42) return 0.5 * %math_exp(MathAbs(x));
   // |x| in [log(maxdouble), overflowthreshold]
   if (MathAbs(x) <= KCOSH_OVERFLOW) {
-    var w = MathExp(0.5 * MathAbs(x));
+    var w = %math_exp(0.5 * MathAbs(x));
     var t = 0.5 * w;
     return t * w;
   }
@@ -915,203 +192,12 @@
   return (x >= 0) ? z : -z;
 }
 
-// ES6 draft 09-27-13, section 20.2.2.21.
-// Return the base 10 logarithm of x
-//
-// Method :
-//      Let log10_2hi = leading 40 bits of log10(2) and
-//          log10_2lo = log10(2) - log10_2hi,
-//          ivln10   = 1/log(10) rounded.
-//      Then
-//              n = ilogb(x),
-//              if(n<0)  n = n+1;
-//              x = scalbn(x,-n);
-//              log10(x) := n*log10_2hi + (n*log10_2lo + ivln10*log(x))
-//
-// Note 1:
-//      To guarantee log10(10**n)=n, where 10**n is normal, the rounding
-//      mode must set to Round-to-Nearest.
-// Note 2:
-//      [1/log(10)] rounded to 53 bits has error .198 ulps;
-//      log10 is monotonic at all binary break points.
-//
-// Special cases:
-//      log10(x) is NaN if x < 0;
-//      log10(+INF) is +INF; log10(0) is -INF;
-//      log10(NaN) is that NaN;
-//      log10(10**N) = N  for N=0,1,...,22.
-//
-
-define IVLN10 = 4.34294481903251816668e-01;
-define LOG10_2HI = 3.01029995663611771306e-01;
-define LOG10_2LO = 3.69423907715893078616e-13;
-
-function MathLog10(x) {
-  x = x * 1;  // Convert to number.
-  var hx = %_DoubleHi(x);
-  var lx = %_DoubleLo(x);
-  var k = 0;
-
-  if (hx < 0x00100000) {
-    // x < 2^-1022
-    // log10(+/- 0) = -Infinity.
-    if (((hx & 0x7fffffff) | lx) === 0) return -INFINITY;
-    // log10 of negative number is NaN.
-    if (hx < 0) return NaN;
-    // Subnormal number. Scale up x.
-    k -= 54;
-    x *= TWO54;
-    hx = %_DoubleHi(x);
-    lx = %_DoubleLo(x);
-  }
-
-  // Infinity or NaN.
-  if (hx >= 0x7ff00000) return x;
-
-  k += (hx >> 20) - 1023;
-  var i = (k & 0x80000000) >>> 31;
-  hx = (hx & 0x000fffff) | ((0x3ff - i) << 20);
-  var y = k + i;
-  x = %_ConstructDouble(hx, lx);
-
-  var z = y * LOG10_2LO + IVLN10 * %_MathLogRT(x);
-  return z + y * LOG10_2HI;
-}
-
-
-// ES6 draft 09-27-13, section 20.2.2.22.
-// Return the base 2 logarithm of x
-//
-// fdlibm does not have an explicit log2 function, but fdlibm's pow
-// function does implement an accurate log2 function as part of the
-// pow implementation.  This extracts the core parts of that as a
-// separate log2 function.
-
-// Method:
-// Compute log2(x) in two pieces:
-// log2(x) = w1 + w2
-// where w1 has 53-24 = 29 bits of trailing zeroes.
-
-define DP_H = 5.84962487220764160156e-01;
-define DP_L = 1.35003920212974897128e-08;
-
-// Polynomial coefficients for (3/2)*(log2(x) - 2*s - 2/3*s^3)
-define LOG2_1 = 5.99999999999994648725e-01;
-define LOG2_2 = 4.28571428578550184252e-01;
-define LOG2_3 = 3.33333329818377432918e-01;
-define LOG2_4 = 2.72728123808534006489e-01;
-define LOG2_5 = 2.30660745775561754067e-01;
-define LOG2_6 = 2.06975017800338417784e-01;
-
-// cp = 2/(3*ln(2)). Note that cp_h + cp_l is cp, but with more accuracy.
-define CP = 9.61796693925975554329e-01;
-define CP_H = 9.61796700954437255859e-01;
-define CP_L = -7.02846165095275826516e-09;
-// 2^53
-define TWO53 = 9007199254740992; 
-
-function MathLog2(x) {
-  x = x * 1;  // Convert to number.
-  var ax = MathAbs(x);
-  var hx = %_DoubleHi(x);
-  var lx = %_DoubleLo(x);
-  var ix = hx & 0x7fffffff;
-
-  // Handle special cases.
-  // log2(+/- 0) = -Infinity
-  if ((ix | lx) == 0) return -INFINITY;
-
-  // log(x) = NaN, if x < 0
-  if (hx < 0) return NaN;
-
-  // log2(Infinity) = Infinity, log2(NaN) = NaN
-  if (ix >= 0x7ff00000) return x;
-
-  var n = 0;
-
-  // Take care of subnormal number.
-  if (ix < 0x00100000) {
-    ax *= TWO53;
-    n -= 53;
-    ix = %_DoubleHi(ax);
-  }
-
-  n += (ix >> 20) - 0x3ff;
-  var j = ix & 0x000fffff;
-
-  // Determine interval.
-  ix = j | 0x3ff00000;  // normalize ix.
-
-  var bp = 1;
-  var dp_h = 0;
-  var dp_l = 0;
-  if (j > 0x3988e) {  // |x| > sqrt(3/2)
-    if (j < 0xbb67a) {  // |x| < sqrt(3)
-      bp = 1.5;
-      dp_h = DP_H;
-      dp_l = DP_L;
-    } else {
-      n += 1;
-      ix -= 0x00100000;
-    }
-  }
- 
-  ax = %_ConstructDouble(ix, %_DoubleLo(ax));
-
-  // Compute ss = s_h + s_l = (x - 1)/(x+1) or (x - 1.5)/(x + 1.5)
-  var u = ax - bp;
-  var v = 1 / (ax + bp);
-  var ss = u * v;
-  var s_h = %_ConstructDouble(%_DoubleHi(ss), 0);
-
-  // t_h = ax + bp[k] High
-  var t_h = %_ConstructDouble(%_DoubleHi(ax + bp), 0)
-  var t_l = ax - (t_h - bp);
-  var s_l = v * ((u - s_h * t_h) - s_h * t_l);
-
-  // Compute log2(ax)
-  var s2 = ss * ss;
-  var r = s2 * s2 * (LOG2_1 + s2 * (LOG2_2 + s2 * (LOG2_3 + s2 * (
-                     LOG2_4 + s2 * (LOG2_5 + s2 * LOG2_6)))));
-  r += s_l * (s_h + ss);
-  s2  = s_h * s_h;
-  t_h = %_ConstructDouble(%_DoubleHi(3.0 + s2 + r), 0);
-  t_l = r - ((t_h - 3.0) - s2);
-  // u + v = ss * (1 + ...)
-  u = s_h * t_h;
-  v = s_l * t_h + t_l * ss;
-
-  // 2 / (3 * log(2)) * (ss + ...)
-  var p_h = %_ConstructDouble(%_DoubleHi(u + v), 0);
-  var p_l = v - (p_h - u);
-  var z_h = CP_H * p_h;
-  var z_l = CP_L * p_h + p_l * CP + dp_l;
-
-  // log2(ax) = (ss + ...) * 2 / (3 * log(2)) = n + dp_h + z_h + z_l
-  var t = n;
-  var t1 = %_ConstructDouble(%_DoubleHi(((z_h + z_l) + dp_h) + t), 0);
-  var t2 = z_l - (((t1 - t) - dp_h) - z_h);
-
-  // t1 + t2 = log2(ax), sum up because we do not care about extra precision.
-  return t1 + t2;
-}
-
 //-------------------------------------------------------------------
 
 utils.InstallFunctions(GlobalMath, DONT_ENUM, [
-  "cos", MathCos,
-  "sin", MathSin,
-  "tan", MathTan,
   "sinh", MathSinh,
   "cosh", MathCosh,
-  "tanh", MathTanh,
-  "log10", MathLog10,
-  "log2", MathLog2,
-  "log1p", MathLog1p,
-  "expm1", MathExpm1
+  "tanh", MathTanh
 ]);
 
-%SetForceInlineFlag(MathSin);
-%SetForceInlineFlag(MathCos);
-
 })
diff --git a/src/type-cache.h b/src/type-cache.h
index 2a95df9..2c13b39 100644
--- a/src/type-cache.h
+++ b/src/type-cache.h
@@ -48,19 +48,30 @@
   Type* const kZeroish =
       Type::Union(kSingletonZero, Type::MinusZeroOrNaN(), zone());
   Type* const kInteger = CreateRange(-V8_INFINITY, V8_INFINITY);
-  Type* const kPositiveInteger = CreateRange(0.0, V8_INFINITY);
   Type* const kIntegerOrMinusZero =
       Type::Union(kInteger, Type::MinusZero(), zone());
   Type* const kIntegerOrMinusZeroOrNaN =
       Type::Union(kIntegerOrMinusZero, Type::NaN(), zone());
+  Type* const kPositiveInteger = CreateRange(0.0, V8_INFINITY);
+  Type* const kPositiveIntegerOrMinusZero =
+      Type::Union(kPositiveInteger, Type::MinusZero(), zone());
+  Type* const kPositiveIntegerOrMinusZeroOrNaN =
+      Type::Union(kPositiveIntegerOrMinusZero, Type::NaN(), zone());
 
   Type* const kAdditiveSafeInteger =
       CreateRange(-4503599627370496.0, 4503599627370496.0);
   Type* const kSafeInteger = CreateRange(-kMaxSafeInteger, kMaxSafeInteger);
+  Type* const kAdditiveSafeIntegerOrMinusZero =
+      Type::Union(kAdditiveSafeInteger, Type::MinusZero(), zone());
+  Type* const kSafeIntegerOrMinusZero =
+      Type::Union(kSafeInteger, Type::MinusZero(), zone());
   Type* const kPositiveSafeInteger = CreateRange(0.0, kMaxSafeInteger);
+  Type* const kSafeSigned32 = CreateRange(-kMaxInt, kMaxInt);
 
   Type* const kUntaggedUndefined =
       Type::Intersect(Type::Undefined(), Type::Untagged(), zone());
+  Type* const kSigned32OrMinusZero =
+      Type::Union(Type::Signed32(), Type::MinusZero(), zone());
 
   // Asm.js related types.
   Type* const kAsmSigned = kInt32;
diff --git a/src/type-feedback-vector-inl.h b/src/type-feedback-vector-inl.h
index 015104e..771021f 100644
--- a/src/type-feedback-vector-inl.h
+++ b/src/type-feedback-vector-inl.h
@@ -14,13 +14,11 @@
 template <typename Derived>
 FeedbackVectorSlot FeedbackVectorSpecBase<Derived>::AddSlot(
     FeedbackVectorSlotKind kind) {
-  Derived* derived = static_cast<Derived*>(this);
-
-  int slot = derived->slots();
+  int slot = This()->slots();
   int entries_per_slot = TypeFeedbackMetadata::GetSlotSize(kind);
-  derived->append(kind);
+  This()->append(kind);
   for (int i = 1; i < entries_per_slot; i++) {
-    derived->append(FeedbackVectorSlotKind::INVALID);
+    This()->append(FeedbackVectorSlotKind::INVALID);
   }
   return FeedbackVectorSlot(slot);
 }
@@ -32,6 +30,10 @@
   return reinterpret_cast<TypeFeedbackMetadata*>(obj);
 }
 
+bool TypeFeedbackMetadata::is_empty() const {
+  if (length() == 0) return true;
+  return false;
+}
 
 int TypeFeedbackMetadata::slot_count() const {
   if (length() == 0) return 0;
@@ -53,6 +55,26 @@
   return kind == FeedbackVectorSlotKind::GENERAL ? 1 : 2;
 }
 
+bool TypeFeedbackMetadata::SlotRequiresName(FeedbackVectorSlotKind kind) {
+  switch (kind) {
+    case FeedbackVectorSlotKind::LOAD_GLOBAL_IC:
+      return true;
+
+    case FeedbackVectorSlotKind::CALL_IC:
+    case FeedbackVectorSlotKind::LOAD_IC:
+    case FeedbackVectorSlotKind::KEYED_LOAD_IC:
+    case FeedbackVectorSlotKind::STORE_IC:
+    case FeedbackVectorSlotKind::KEYED_STORE_IC:
+    case FeedbackVectorSlotKind::GENERAL:
+    case FeedbackVectorSlotKind::INVALID:
+      return false;
+
+    case FeedbackVectorSlotKind::KINDS_NUMBER:
+      break;
+  }
+  UNREACHABLE();
+  return false;
+}
 
 bool TypeFeedbackVector::is_empty() const {
   if (length() == 0) return true;
@@ -73,24 +95,10 @@
                     : TypeFeedbackMetadata::cast(get(kMetadataIndex));
 }
 
-
-FeedbackVectorSlotKind TypeFeedbackVector::GetKind(
-    FeedbackVectorSlot slot) const {
-  DCHECK(!is_empty());
-  return metadata()->GetKind(slot);
-}
-
-
-int TypeFeedbackVector::GetIndex(FeedbackVectorSlot slot) const {
-  DCHECK(slot.ToInt() < slot_count());
-  return kReservedIndexCount + slot.ToInt();
-}
-
-
-// Conversion from an integer index to either a slot or an ic slot. The caller
-// should know what kind she expects.
-FeedbackVectorSlot TypeFeedbackVector::ToSlot(int index) const {
-  DCHECK(index >= kReservedIndexCount && index < length());
+// Conversion from an integer index to either a slot or an ic slot.
+// static
+FeedbackVectorSlot TypeFeedbackVector::ToSlot(int index) {
+  DCHECK(index >= kReservedIndexCount);
   return FeedbackVectorSlot(index - kReservedIndexCount);
 }
 
@@ -149,6 +157,21 @@
   return isolate->heap()->uninitialized_symbol();
 }
 
+bool TypeFeedbackMetadataIterator::HasNext() const {
+  return next_slot_.ToInt() < metadata()->slot_count();
+}
+
+FeedbackVectorSlot TypeFeedbackMetadataIterator::Next() {
+  DCHECK(HasNext());
+  cur_slot_ = next_slot_;
+  slot_kind_ = metadata()->GetKind(cur_slot_);
+  next_slot_ = FeedbackVectorSlot(next_slot_.ToInt() + entry_size());
+  return cur_slot_;
+}
+
+int TypeFeedbackMetadataIterator::entry_size() const {
+  return TypeFeedbackMetadata::GetSlotSize(kind());
+}
 
 Object* FeedbackNexus::GetFeedback() const { return vector()->Get(slot()); }
 
diff --git a/src/type-feedback-vector.cc b/src/type-feedback-vector.cc
index 4519bd6..bc2f1c2 100644
--- a/src/type-feedback-vector.cc
+++ b/src/type-feedback-vector.cc
@@ -37,6 +37,23 @@
   return VectorICComputer::decode(data, slot.ToInt());
 }
 
+String* TypeFeedbackMetadata::GetName(FeedbackVectorSlot slot) const {
+  DCHECK(SlotRequiresName(GetKind(slot)));
+  FixedArray* names = FixedArray::cast(get(kNamesTableIndex));
+  // TODO(ishell): consider using binary search here or even Dictionary when we
+  // have more ICs with names.
+  Smi* key = Smi::FromInt(slot.ToInt());
+  for (int entry = 0; entry < names->length(); entry += kNameTableEntrySize) {
+    Object* current_key = names->get(entry + kNameTableSlotIndex);
+    if (current_key == key) {
+      Object* name = names->get(entry + kNameTableNameIndex);
+      DCHECK(name->IsString());
+      return String::cast(name);
+    }
+  }
+  UNREACHABLE();
+  return nullptr;
+}
 
 void TypeFeedbackMetadata::SetKind(FeedbackVectorSlot slot,
                                    FeedbackVectorSlotKind kind) {
@@ -57,12 +74,13 @@
 template <typename Spec>
 Handle<TypeFeedbackMetadata> TypeFeedbackMetadata::New(Isolate* isolate,
                                                        const Spec* spec) {
+  Factory* factory = isolate->factory();
+
   const int slot_count = spec->slots();
   const int slot_kinds_length = VectorICComputer::word_count(slot_count);
   const int length = slot_kinds_length + kReservedIndexCount;
   if (length == kReservedIndexCount) {
-    return Handle<TypeFeedbackMetadata>::cast(
-        isolate->factory()->empty_fixed_array());
+    return Handle<TypeFeedbackMetadata>::cast(factory->empty_fixed_array());
   }
 #ifdef DEBUG
   for (int i = 0; i < slot_count;) {
@@ -76,7 +94,7 @@
   }
 #endif
 
-  Handle<FixedArray> array = isolate->factory()->NewFixedArray(length, TENURED);
+  Handle<FixedArray> array = factory->NewFixedArray(length, TENURED);
   array->set(kSlotsCountIndex, Smi::FromInt(slot_count));
   // Fill the bit-vector part with zeros.
   for (int i = 0; i < slot_kinds_length; i++) {
@@ -85,9 +103,37 @@
 
   Handle<TypeFeedbackMetadata> metadata =
       Handle<TypeFeedbackMetadata>::cast(array);
+
+  // Add names to NamesTable.
+  const int name_count = spec->name_count();
+
+  Handle<FixedArray> names =
+      name_count == 0
+          ? factory->empty_fixed_array()
+          : factory->NewFixedArray(name_count * kNameTableEntrySize);
+  int name_index = 0;
   for (int i = 0; i < slot_count; i++) {
-    metadata->SetKind(FeedbackVectorSlot(i), spec->GetKind(i));
+    FeedbackVectorSlotKind kind = spec->GetKind(i);
+    metadata->SetKind(FeedbackVectorSlot(i), kind);
+    if (SlotRequiresName(kind)) {
+      Handle<String> name = spec->GetName(name_index);
+      DCHECK(!name.is_null());
+      int entry = name_index * kNameTableEntrySize;
+      names->set(entry + kNameTableSlotIndex, Smi::FromInt(i));
+      names->set(entry + kNameTableNameIndex, *name);
+      name_index++;
+    }
   }
+  DCHECK_EQ(name_count, name_index);
+  metadata->set(kNamesTableIndex, *names);
+
+  // It's important that the TypeFeedbackMetadata have a COW map, since it's
+  // pointed to by both a SharedFunctionInfo and indirectly by closures through
+  // the TypeFeedbackVector. The serializer uses the COW map type to decide
+  // this object belongs in the startup snapshot and not the partial
+  // snapshot(s).
+  metadata->set_map(isolate->heap()->fixed_cow_array_map());
+
   return metadata;
 }
 
@@ -99,14 +145,51 @@
   }
 
   int slots = slot_count();
-  for (int i = 0; i < slots; i++) {
-    if (GetKind(FeedbackVectorSlot(i)) != other_spec->GetKind(i)) {
+  int name_index = 0;
+  for (int i = 0; i < slots;) {
+    FeedbackVectorSlot slot(i);
+    FeedbackVectorSlotKind kind = GetKind(slot);
+    int entry_size = TypeFeedbackMetadata::GetSlotSize(kind);
+
+    if (kind != other_spec->GetKind(i)) {
       return true;
     }
+    if (SlotRequiresName(kind)) {
+      String* name = GetName(slot);
+      DCHECK(name != GetHeap()->empty_string());
+      String* other_name = *other_spec->GetName(name_index++);
+      if (name != other_name) {
+        return true;
+      }
+    }
+    i += entry_size;
   }
   return false;
 }
 
+bool TypeFeedbackMetadata::DiffersFrom(
+    const TypeFeedbackMetadata* other_metadata) const {
+  if (other_metadata->slot_count() != slot_count()) {
+    return true;
+  }
+
+  int slots = slot_count();
+  for (int i = 0; i < slots;) {
+    FeedbackVectorSlot slot(i);
+    FeedbackVectorSlotKind kind = GetKind(slot);
+    int entry_size = TypeFeedbackMetadata::GetSlotSize(kind);
+    if (GetKind(slot) != other_metadata->GetKind(slot)) {
+      return true;
+    }
+    if (SlotRequiresName(kind)) {
+      if (GetName(slot) != other_metadata->GetName(slot)) {
+        return true;
+      }
+    }
+    i += entry_size;
+  }
+  return false;
+}
 
 const char* TypeFeedbackMetadata::Kind2String(FeedbackVectorSlotKind kind) {
   switch (kind) {
@@ -116,6 +199,8 @@
       return "CALL_IC";
     case FeedbackVectorSlotKind::LOAD_IC:
       return "LOAD_IC";
+    case FeedbackVectorSlotKind::LOAD_GLOBAL_IC:
+      return "LOAD_GLOBAL_IC";
     case FeedbackVectorSlotKind::KEYED_LOAD_IC:
       return "KEYED_LOAD_IC";
     case FeedbackVectorSlotKind::STORE_IC:
@@ -131,6 +216,16 @@
   return "?";
 }
 
+FeedbackVectorSlotKind TypeFeedbackVector::GetKind(
+    FeedbackVectorSlot slot) const {
+  DCHECK(!is_empty());
+  return metadata()->GetKind(slot);
+}
+
+String* TypeFeedbackVector::GetName(FeedbackVectorSlot slot) const {
+  DCHECK(!is_empty());
+  return metadata()->GetName(slot);
+}
 
 // static
 Handle<TypeFeedbackVector> TypeFeedbackVector::New(
@@ -146,13 +241,29 @@
   Handle<FixedArray> array = factory->NewFixedArray(length, TENURED);
   array->set(kMetadataIndex, *metadata);
 
+  DisallowHeapAllocation no_gc;
+
   // Ensure we can skip the write barrier
   Handle<Object> uninitialized_sentinel = UninitializedSentinel(isolate);
   DCHECK_EQ(*factory->uninitialized_symbol(), *uninitialized_sentinel);
-  for (int i = kReservedIndexCount; i < length; i++) {
-    array->set(i, *uninitialized_sentinel, SKIP_WRITE_BARRIER);
-  }
+  for (int i = 0; i < slot_count;) {
+    FeedbackVectorSlot slot(i);
+    FeedbackVectorSlotKind kind = metadata->GetKind(slot);
+    int index = TypeFeedbackVector::GetIndex(slot);
+    int entry_size = TypeFeedbackMetadata::GetSlotSize(kind);
 
+    Object* value;
+    if (kind == FeedbackVectorSlotKind::LOAD_GLOBAL_IC) {
+      value = *factory->empty_weak_cell();
+    } else {
+      value = *uninitialized_sentinel;
+    }
+    array->set(index, value, SKIP_WRITE_BARRIER);
+    for (int j = 1; j < entry_size; j++) {
+      array->set(index + j, *uninitialized_sentinel, SKIP_WRITE_BARRIER);
+    }
+    i += entry_size;
+  }
   return Handle<TypeFeedbackVector>::cast(array);
 }
 
@@ -208,6 +319,11 @@
           nexus.Clear(shared->code());
           break;
         }
+        case FeedbackVectorSlotKind::LOAD_GLOBAL_IC: {
+          LoadGlobalICNexus nexus(this, slot);
+          nexus.Clear(shared->code());
+          break;
+        }
         case FeedbackVectorSlotKind::KEYED_LOAD_IC: {
           KeyedLoadICNexus nexus(this, slot);
           nexus.Clear(shared->code());
@@ -251,8 +367,28 @@
   SharedFunctionInfo::Iterator iterator(isolate);
   SharedFunctionInfo* shared;
   while ((shared = iterator.Next())) {
-    TypeFeedbackVector* vector = shared->feedback_vector();
-    vector->ClearKeyedStoreICs(shared);
+    if (!shared->OptimizedCodeMapIsCleared()) {
+      FixedArray* optimized_code_map = shared->optimized_code_map();
+      int length = optimized_code_map->length();
+      for (int i = SharedFunctionInfo::kEntriesStart; i < length;
+           i += SharedFunctionInfo::kEntryLength) {
+        Object* lits =
+            optimized_code_map->get(i + SharedFunctionInfo::kLiteralsOffset);
+        TypeFeedbackVector* vector = nullptr;
+        if (lits->IsWeakCell()) {
+          WeakCell* cell = WeakCell::cast(lits);
+          if (cell->value()->IsLiteralsArray()) {
+            vector = LiteralsArray::cast(cell->value())->feedback_vector();
+          }
+        } else {
+          DCHECK(lits->IsLiteralsArray());
+          vector = LiteralsArray::cast(lits)->feedback_vector();
+        }
+        if (vector != nullptr) {
+          vector->ClearKeyedStoreICs(shared);
+        }
+      }
+    }
   }
 }
 
@@ -389,6 +525,17 @@
   return UNINITIALIZED;
 }
 
+InlineCacheState LoadGlobalICNexus::StateFromFeedback() const {
+  Isolate* isolate = GetIsolate();
+  Object* feedback = GetFeedback();
+
+  Object* extra = GetFeedbackExtra();
+  if (!WeakCell::cast(feedback)->cleared() ||
+      extra != *TypeFeedbackVector::UninitializedSentinel(isolate)) {
+    return MONOMORPHIC;
+  }
+  return UNINITIALIZED;
+}
 
 InlineCacheState KeyedLoadICNexus::StateFromFeedback() const {
   Isolate* isolate = GetIsolate();
@@ -488,7 +635,7 @@
 int CallICNexus::ExtractCallCount() {
   Object* call_count = GetFeedbackExtra();
   if (call_count->IsSmi()) {
-    int value = Smi::cast(call_count)->value() / 2;
+    int value = Smi::cast(call_count)->value();
     return value;
   }
   return -1;
@@ -505,14 +652,14 @@
         GetIsolate()->factory()->NewAllocationSite();
     SetFeedback(*new_site);
   }
-  SetFeedbackExtra(Smi::FromInt(kCallCountIncrement), SKIP_WRITE_BARRIER);
+  SetFeedbackExtra(Smi::FromInt(1), SKIP_WRITE_BARRIER);
 }
 
 
 void CallICNexus::ConfigureMonomorphic(Handle<JSFunction> function) {
   Handle<WeakCell> new_cell = GetIsolate()->factory()->NewWeakCell(function);
   SetFeedback(*new_cell);
-  SetFeedbackExtra(Smi::FromInt(kCallCountIncrement), SKIP_WRITE_BARRIER);
+  SetFeedbackExtra(Smi::FromInt(1), SKIP_WRITE_BARRIER);
 }
 
 
@@ -524,8 +671,7 @@
 void CallICNexus::ConfigureMegamorphic(int call_count) {
   SetFeedback(*TypeFeedbackVector::MegamorphicSentinel(GetIsolate()),
               SKIP_WRITE_BARRIER);
-  SetFeedbackExtra(Smi::FromInt(call_count * kCallCountIncrement),
-                   SKIP_WRITE_BARRIER);
+  SetFeedbackExtra(Smi::FromInt(call_count), SKIP_WRITE_BARRIER);
 }
 
 
@@ -536,6 +682,24 @@
   SetFeedbackExtra(*handler);
 }
 
+void LoadGlobalICNexus::ConfigureUninitialized() {
+  Isolate* isolate = GetIsolate();
+  SetFeedback(isolate->heap()->empty_weak_cell(), SKIP_WRITE_BARRIER);
+  SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(isolate),
+                   SKIP_WRITE_BARRIER);
+}
+
+void LoadGlobalICNexus::ConfigurePropertyCellMode(Handle<PropertyCell> cell) {
+  Isolate* isolate = GetIsolate();
+  SetFeedback(*isolate->factory()->NewWeakCell(cell));
+  SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(isolate),
+                   SKIP_WRITE_BARRIER);
+}
+
+void LoadGlobalICNexus::ConfigureHandlerMode(Handle<Code> handler) {
+  SetFeedback(GetIsolate()->heap()->empty_weak_cell());
+  SetFeedbackExtra(*handler);
+}
 
 void KeyedLoadICNexus::ConfigureMonomorphic(Handle<Name> name,
                                             Handle<Map> receiver_map,
@@ -587,7 +751,6 @@
                    SKIP_WRITE_BARRIER);
 }
 
-
 void KeyedLoadICNexus::ConfigurePolymorphic(Handle<Name> name,
                                             MapHandleList* maps,
                                             CodeHandleList* handlers) {
@@ -781,6 +944,9 @@
 
 void LoadICNexus::Clear(Code* host) { LoadIC::Clear(GetIsolate(), host, this); }
 
+void LoadGlobalICNexus::Clear(Code* host) {
+  LoadGlobalIC::Clear(GetIsolate(), host, this);
+}
 
 void KeyedLoadICNexus::Clear(Code* host) {
   KeyedLoadIC::Clear(GetIsolate(), host, this);
diff --git a/src/type-feedback-vector.h b/src/type-feedback-vector.h
index 770b5e5..38d5695 100644
--- a/src/type-feedback-vector.h
+++ b/src/type-feedback-vector.h
@@ -15,7 +15,6 @@
 namespace v8 {
 namespace internal {
 
-
 enum class FeedbackVectorSlotKind {
   // This kind means that the slot points to the middle of other slot
   // which occupies more than one feedback vector element.
@@ -24,6 +23,7 @@
 
   CALL_IC,
   LOAD_IC,
+  LOAD_GLOBAL_IC,
   KEYED_LOAD_IC,
   STORE_IC,
   KEYED_STORE_IC,
@@ -34,7 +34,6 @@
   KINDS_NUMBER  // Last value indicating number of kinds.
 };
 
-
 std::ostream& operator<<(std::ostream& os, FeedbackVectorSlotKind kind);
 
 
@@ -51,6 +50,11 @@
     return AddSlot(FeedbackVectorSlotKind::LOAD_IC);
   }
 
+  FeedbackVectorSlot AddLoadGlobalICSlot(Handle<String> name) {
+    This()->append_name(name);
+    return AddSlot(FeedbackVectorSlotKind::LOAD_GLOBAL_IC);
+  }
+
   FeedbackVectorSlot AddKeyedLoadICSlot() {
     return AddSlot(FeedbackVectorSlotKind::KEYED_LOAD_IC);
   }
@@ -66,40 +70,65 @@
   FeedbackVectorSlot AddGeneralSlot() {
     return AddSlot(FeedbackVectorSlotKind::GENERAL);
   }
+
+#ifdef OBJECT_PRINT
+  // For gdb debugging.
+  void Print();
+#endif  // OBJECT_PRINT
+
+  DECLARE_PRINTER(FeedbackVectorSpec)
+
+ private:
+  Derived* This() { return static_cast<Derived*>(this); }
 };
 
 
 class StaticFeedbackVectorSpec
     : public FeedbackVectorSpecBase<StaticFeedbackVectorSpec> {
  public:
-  StaticFeedbackVectorSpec() : slots_(0) {}
+  StaticFeedbackVectorSpec() : slot_count_(0), name_count_(0) {}
 
-  int slots() const { return slots_; }
+  int slots() const { return slot_count_; }
 
   FeedbackVectorSlotKind GetKind(int slot) const {
-    DCHECK(slot >= 0 && slot < slots_);
+    DCHECK(slot >= 0 && slot < slot_count_);
     return kinds_[slot];
   }
 
+  int name_count() const { return name_count_; }
+
+  Handle<String> GetName(int index) const {
+    DCHECK(index >= 0 && index < name_count_);
+    return names_[index];
+  }
+
  private:
   friend class FeedbackVectorSpecBase<StaticFeedbackVectorSpec>;
 
   void append(FeedbackVectorSlotKind kind) {
-    DCHECK(slots_ < kMaxLength);
-    kinds_[slots_++] = kind;
+    DCHECK(slot_count_ < kMaxLength);
+    kinds_[slot_count_++] = kind;
+  }
+
+  void append_name(Handle<String> name) {
+    DCHECK(name_count_ < kMaxLength);
+    names_[name_count_++] = name;
   }
 
   static const int kMaxLength = 12;
 
-  int slots_;
+  int slot_count_;
   FeedbackVectorSlotKind kinds_[kMaxLength];
+  int name_count_;
+  Handle<String> names_[kMaxLength];
 };
 
 
 class FeedbackVectorSpec : public FeedbackVectorSpecBase<FeedbackVectorSpec> {
  public:
-  explicit FeedbackVectorSpec(Zone* zone) : slot_kinds_(zone) {
+  explicit FeedbackVectorSpec(Zone* zone) : slot_kinds_(zone), names_(zone) {
     slot_kinds_.reserve(16);
+    names_.reserve(8);
   }
 
   int slots() const { return static_cast<int>(slot_kinds_.size()); }
@@ -108,6 +137,10 @@
     return static_cast<FeedbackVectorSlotKind>(slot_kinds_.at(slot));
   }
 
+  int name_count() const { return static_cast<int>(names_.size()); }
+
+  Handle<String> GetName(int index) const { return names_.at(index); }
+
  private:
   friend class FeedbackVectorSpecBase<FeedbackVectorSpec>;
 
@@ -115,13 +148,17 @@
     slot_kinds_.push_back(static_cast<unsigned char>(kind));
   }
 
+  void append_name(Handle<String> name) { names_.push_back(name); }
+
   ZoneVector<unsigned char> slot_kinds_;
+  ZoneVector<Handle<String>> names_;
 };
 
 
 // The shape of the TypeFeedbackMetadata is an array with:
 // 0: slot_count
-// 1..N: slot kinds packed into a bit vector
+// 1: names table
+// 2..N: slot kinds packed into a bit vector
 //
 class TypeFeedbackMetadata : public FixedArray {
  public:
@@ -129,19 +166,34 @@
   static inline TypeFeedbackMetadata* cast(Object* obj);
 
   static const int kSlotsCountIndex = 0;
-  static const int kReservedIndexCount = 1;
+  static const int kNamesTableIndex = 1;
+  static const int kReservedIndexCount = 2;
+
+  static const int kNameTableEntrySize = 2;
+  static const int kNameTableSlotIndex = 0;
+  static const int kNameTableNameIndex = 1;
 
   // Returns number of feedback vector elements used by given slot kind.
   static inline int GetSlotSize(FeedbackVectorSlotKind kind);
 
+  // Defines if slots of given kind require "name".
+  static inline bool SlotRequiresName(FeedbackVectorSlotKind kind);
+
   bool SpecDiffersFrom(const FeedbackVectorSpec* other_spec) const;
 
+  bool DiffersFrom(const TypeFeedbackMetadata* other_metadata) const;
+
+  inline bool is_empty() const;
+
   // Returns number of slots in the vector.
   inline int slot_count() const;
 
   // Returns slot kind for given slot.
   FeedbackVectorSlotKind GetKind(FeedbackVectorSlot slot) const;
 
+  // Returns name for given slot.
+  String* GetName(FeedbackVectorSlot slot) const;
+
   template <typename Spec>
   static Handle<TypeFeedbackMetadata> New(Isolate* isolate, const Spec* spec);
 
@@ -155,7 +207,7 @@
   static const char* Kind2String(FeedbackVectorSlotKind kind);
 
  private:
-  static const int kFeedbackVectorSlotKindBits = 3;
+  static const int kFeedbackVectorSlotKindBits = 4;
   STATIC_ASSERT(static_cast<int>(FeedbackVectorSlotKind::KINDS_NUMBER) <
                 (1 << kFeedbackVectorSlotKindBits));
 
@@ -172,9 +224,9 @@
 // 0: feedback metadata
 // 1: ics_with_types
 // 2: ics_with_generic_info
-// 3: feedback slot #0 (N >= 3)
+// 3: feedback slot #0
 // ...
-// N + slot_count - 1: feedback slot #(slot_count-1)
+// 3 + slot_count - 1: feedback slot #(slot_count-1)
 //
 class TypeFeedbackVector : public FixedArray {
  public:
@@ -194,18 +246,22 @@
   inline TypeFeedbackMetadata* metadata() const;
 
   // Conversion from a slot to an integer index to the underlying array.
-  inline int GetIndex(FeedbackVectorSlot slot) const;
+  static int GetIndex(FeedbackVectorSlot slot) {
+    return kReservedIndexCount + slot.ToInt();
+  }
   static int GetIndexFromSpec(const FeedbackVectorSpec* spec,
                               FeedbackVectorSlot slot);
 
   // Conversion from an integer index to the underlying array to a slot.
-  inline FeedbackVectorSlot ToSlot(int index) const;
+  static inline FeedbackVectorSlot ToSlot(int index);
   inline Object* Get(FeedbackVectorSlot slot) const;
   inline void Set(FeedbackVectorSlot slot, Object* value,
                   WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
 
   // Returns slot kind for given slot.
-  inline FeedbackVectorSlotKind GetKind(FeedbackVectorSlot slot) const;
+  FeedbackVectorSlotKind GetKind(FeedbackVectorSlot slot) const;
+  // Returns name corresponding to given slot or an empty string.
+  String* GetName(FeedbackVectorSlot slot) const;
 
   static Handle<TypeFeedbackVector> New(Isolate* isolate,
                                         Handle<TypeFeedbackMetadata> metadata);
@@ -280,23 +336,17 @@
  public:
   explicit TypeFeedbackMetadataIterator(Handle<TypeFeedbackMetadata> metadata)
       : metadata_handle_(metadata),
-        slot_(FeedbackVectorSlot(0)),
+        next_slot_(FeedbackVectorSlot(0)),
         slot_kind_(FeedbackVectorSlotKind::INVALID) {}
 
   explicit TypeFeedbackMetadataIterator(TypeFeedbackMetadata* metadata)
       : metadata_(metadata),
-        slot_(FeedbackVectorSlot(0)),
+        next_slot_(FeedbackVectorSlot(0)),
         slot_kind_(FeedbackVectorSlotKind::INVALID) {}
 
-  bool HasNext() const { return slot_.ToInt() < metadata()->slot_count(); }
+  inline bool HasNext() const;
 
-  FeedbackVectorSlot Next() {
-    DCHECK(HasNext());
-    FeedbackVectorSlot slot = slot_;
-    slot_kind_ = metadata()->GetKind(slot);
-    slot_ = FeedbackVectorSlot(slot_.ToInt() + entry_size());
-    return slot;
-  }
+  inline FeedbackVectorSlot Next();
 
   // Returns slot kind of the last slot returned by Next().
   FeedbackVectorSlotKind kind() const {
@@ -306,7 +356,12 @@
   }
 
   // Returns entry size of the last slot returned by Next().
-  int entry_size() const { return TypeFeedbackMetadata::GetSlotSize(kind()); }
+  inline int entry_size() const;
+
+  String* name() const {
+    DCHECK(TypeFeedbackMetadata::SlotRequiresName(kind()));
+    return metadata()->GetName(cur_slot_);
+  }
 
  private:
   TypeFeedbackMetadata* metadata() const {
@@ -318,7 +373,8 @@
   // pointer use cases.
   Handle<TypeFeedbackMetadata> metadata_handle_;
   TypeFeedbackMetadata* metadata_;
-  FeedbackVectorSlot slot_;
+  FeedbackVectorSlot cur_slot_;
+  FeedbackVectorSlot next_slot_;
   FeedbackVectorSlotKind slot_kind_;
 };
 
@@ -393,10 +449,6 @@
 
 class CallICNexus final : public FeedbackNexus {
  public:
-  // Monomorphic call ics store call counts. Platform code needs to increment
-  // the count appropriately (ie, by 2).
-  static const int kCallCountIncrement = 2;
-
   CallICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
       : FeedbackNexus(vector, slot) {
     DCHECK_EQ(FeedbackVectorSlotKind::CALL_IC, vector->GetKind(slot));
@@ -454,6 +506,37 @@
   InlineCacheState StateFromFeedback() const override;
 };
 
+class LoadGlobalICNexus : public FeedbackNexus {
+ public:
+  LoadGlobalICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC, vector->GetKind(slot));
+  }
+  LoadGlobalICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC, vector->GetKind(slot));
+  }
+
+  int ExtractMaps(MapHandleList* maps) const final {
+    // LoadGlobalICs don't record map feedback.
+    return 0;
+  }
+  MaybeHandle<Code> FindHandlerForMap(Handle<Map> map) const final {
+    return MaybeHandle<Code>();
+  }
+  bool FindHandlers(CodeHandleList* code_list, int length = -1) const final {
+    return length == 0;
+  }
+
+  void ConfigureMegamorphic() override { UNREACHABLE(); }
+  void Clear(Code* host);
+
+  void ConfigureUninitialized() override;
+  void ConfigurePropertyCellMode(Handle<PropertyCell> cell);
+  void ConfigureHandlerMode(Handle<Code> handler);
+
+  InlineCacheState StateFromFeedback() const override;
+};
 
 class KeyedLoadICNexus : public FeedbackNexus {
  public:
diff --git a/src/type-info.cc b/src/type-info.cc
index 87b727e..5f5c1e8 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -108,7 +108,7 @@
 
 bool TypeFeedbackOracle::CallIsUninitialized(FeedbackVectorSlot slot) {
   Handle<Object> value = GetInfo(slot);
-  return value->IsUndefined() ||
+  return value->IsUndefined(isolate()) ||
          value.is_identical_to(
              TypeFeedbackVector::UninitializedSentinel(isolate()));
 }
diff --git a/src/types.cc b/src/types.cc
index c222861..a48736b 100644
--- a/src/types.cc
+++ b/src/types.cc
@@ -201,6 +201,8 @@
     case SIMD128_VALUE_TYPE:
       return kSimd;
     case JS_OBJECT_TYPE:
+    case JS_ARGUMENTS_TYPE:
+    case JS_ERROR_TYPE:
     case JS_GLOBAL_OBJECT_TYPE:
     case JS_GLOBAL_PROXY_TYPE:
     case JS_API_OBJECT_TYPE:
diff --git a/src/types.h b/src/types.h
index 8061410..2541838 100644
--- a/src/types.h
+++ b/src/types.h
@@ -219,6 +219,8 @@
   V(BooleanOrNullOrUndefined, kBoolean | kNull | kUndefined) \
   V(NullOrUndefined,          kNull | kUndefined) \
   V(Undetectable,             kNullOrUndefined | kOtherUndetectable) \
+  V(NumberOrOddball,          kNumber | kNullOrUndefined | kBoolean) \
+  V(NumberOrSimdOrString,     kNumber | kSimd | kString) \
   V(NumberOrString,           kNumber | kString) \
   V(NumberOrUndefined,        kNumber | kUndefined) \
   V(PlainPrimitive,           kNumberOrString | kBoolean | kNullOrUndefined) \
@@ -229,6 +231,7 @@
   V(StringOrReceiver,         kString | kReceiver) \
   V(Unique,                   kBoolean | kUniqueName | kNull | kUndefined | \
                               kReceiver) \
+  V(NonInternal,              kPrimitive | kReceiver) \
   V(NonNumber,                kUnique | kString | kInternal) \
   V(Any,                      0xfffffffeu)
 
@@ -740,8 +743,8 @@
   SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
 #undef CONSTRUCT_SIMD_TYPE
 
-  static Type* Union(Type* type1, Type* type2, Zone* reg);
-  static Type* Intersect(Type* type1, Type* type2, Zone* reg);
+  static Type* Union(Type* type1, Type* type2, Zone* zone);
+  static Type* Intersect(Type* type1, Type* type2, Zone* zone);
 
   static Type* Of(double value, Zone* zone) {
     return BitsetType::New(BitsetType::ExpandInternals(BitsetType::Lub(value)));
diff --git a/src/typing-asm.cc b/src/typing-asm.cc
index e541539..2390e7e 100644
--- a/src/typing-asm.cc
+++ b/src/typing-asm.cc
@@ -53,10 +53,10 @@
   stdlib_simd_##name##_types_(zone),
       SIMD128_TYPES(V)
 #undef V
-          global_variable_type_(HashMap::PointersMatch,
+          global_variable_type_(base::HashMap::PointersMatch,
                                 ZoneHashMap::kDefaultHashMapCapacity,
                                 ZoneAllocationPolicy(zone)),
-      local_variable_type_(HashMap::PointersMatch,
+      local_variable_type_(base::HashMap::PointersMatch,
                            ZoneHashMap::kDefaultHashMapCapacity,
                            ZoneAllocationPolicy(zone)),
       in_function_(false),
@@ -166,6 +166,10 @@
   // Set function type so global references to functions have some type
   // (so they can give a more useful error).
   Variable* var = decl->proxy()->var();
+  if (GetVariableInfo(var)) {
+    // Detect previously-seen functions.
+    FAIL(decl->fun(), "function repeated in module");
+  }
   SetType(var, Type::Function());
 }
 
@@ -508,7 +512,7 @@
   RECURSE(VisitStatements(expr->body()));
   in_function_ = false;
   return_type_ = save_return_type;
-  IntersectResult(expr, type);
+  RECURSE(IntersectResult(expr, type));
 }
 
 
@@ -552,7 +556,7 @@
     FAIL(expr, "then and else expressions in ? must have the same type");
   }
 
-  IntersectResult(expr, then_type);
+  RECURSE(IntersectResult(expr, then_type));
 }
 
 
@@ -579,7 +583,7 @@
   Type* type = Type::Intersect(info->type, expected_type_, zone());
   if (type->Is(cache_.kAsmInt)) type = cache_.kAsmInt;
   intish_ = 0;
-  IntersectResult(expr, type);
+  RECURSE(IntersectResult(expr, type));
 }
 
 void AsmTyper::VisitLiteral(Literal* expr, bool is_return) {
@@ -589,22 +593,22 @@
     int32_t i;
     uint32_t u;
     if (expr->raw_value()->ContainsDot()) {
-      IntersectResult(expr, cache_.kAsmDouble);
+      RECURSE(IntersectResult(expr, cache_.kAsmDouble));
     } else if (!is_return && value->ToUint32(&u)) {
       if (u <= 0x7fffffff) {
-        IntersectResult(expr, cache_.kAsmFixnum);
+        RECURSE(IntersectResult(expr, cache_.kAsmFixnum));
       } else {
-        IntersectResult(expr, cache_.kAsmUnsigned);
+        RECURSE(IntersectResult(expr, cache_.kAsmUnsigned));
       }
     } else if (value->ToInt32(&i)) {
-      IntersectResult(expr, cache_.kAsmSigned);
+      RECURSE(IntersectResult(expr, cache_.kAsmSigned));
     } else {
       FAIL(expr, "illegal number");
     }
   } else if (!is_return && value->IsString()) {
-    IntersectResult(expr, Type::String());
-  } else if (value->IsUndefined()) {
-    IntersectResult(expr, Type::Undefined());
+    RECURSE(IntersectResult(expr, Type::String()));
+  } else if (value->IsUndefined(isolate_)) {
+    RECURSE(IntersectResult(expr, Type::Undefined()));
   } else {
     FAIL(expr, "illegal literal");
   }
@@ -633,7 +637,7 @@
       FAIL(prop->value(), "non-function in function table");
     }
   }
-  IntersectResult(expr, Type::Object());
+  RECURSE(IntersectResult(expr, Type::Object()));
 }
 
 
@@ -653,7 +657,7 @@
     elem_type = Type::Union(elem_type, computed_type_, zone());
   }
   array_size_ = values->length();
-  IntersectResult(expr, Type::Array(elem_type, zone()));
+  RECURSE(IntersectResult(expr, Type::Array(elem_type, zone())));
 }
 
 
@@ -682,6 +686,9 @@
     if (intish_ != 0) {
       FAIL(expr, "intish or floatish assignment");
     }
+    if (in_function_ && target_type->IsArray()) {
+      FAIL(expr, "assignment to array variable");
+    }
     expected_type_ = target_type;
     Variable* var = proxy->var();
     VariableInfo* info = GetVariableInfo(var);
@@ -701,7 +708,7 @@
     if (type->Is(cache_.kAsmInt)) type = cache_.kAsmInt;
     info->type = type;
     intish_ = 0;
-    IntersectResult(proxy, type);
+    RECURSE(IntersectResult(proxy, type));
   } else if (expr->target()->IsProperty()) {
     // Assignment to a property: should be a heap assignment {H[x] = y}.
     int32_t value_intish = intish_;
@@ -716,7 +723,7 @@
     }
     VisitHeapAccess(property, true, target_type);
   }
-  IntersectResult(expr, target_type);
+  RECURSE(IntersectResult(expr, target_type));
 }
 
 
@@ -776,7 +783,7 @@
     // bin->set_bounds(Bounds(cache_.kAsmSigned));
     RECURSE(VisitWithExpectation(expr->key(), cache_.kAsmSigned,
                                  "must be integer"));
-    IntersectResult(expr, type);
+    RECURSE(IntersectResult(expr, type));
   } else {
     Literal* literal = expr->key()->AsLiteral();
     if (literal) {
@@ -835,8 +842,8 @@
         FAIL(expr, "illegal type in assignment");
       }
     } else {
-      IntersectResult(expr, expected_type_);
-      IntersectResult(expr, result_type);
+      RECURSE(IntersectResult(expr, expected_type_));
+      RECURSE(IntersectResult(expr, result_type));
     }
   }
 }
@@ -1034,7 +1041,7 @@
       intish_ = 0;
       bounds_.set(expr->expression(),
                   Bounds(Type::Function(Type::Any(), zone())));
-      IntersectResult(expr, expected_type);
+      RECURSE(IntersectResult(expr, expected_type));
     } else {
       if (fun_type->Arity() != args->length()) {
         FAIL(expr, "call with wrong arity");
@@ -1051,7 +1058,7 @@
       }
       RECURSE(CheckPolymorphicStdlibArguments(standard_member, args));
       intish_ = 0;
-      IntersectResult(expr, result_type);
+      RECURSE(IntersectResult(expr, result_type));
     }
   } else {
     FAIL(expr, "invalid callee");
@@ -1076,7 +1083,7 @@
           arg, fun_type->Parameter(i),
           "constructor argument expected to match callee parameter"));
     }
-    IntersectResult(expr, fun_type->Result());
+    RECURSE(IntersectResult(expr, fun_type->Result()));
     return;
   }
 
@@ -1097,7 +1104,7 @@
     case Token::NOT:  // Used to encode != and !==
       RECURSE(VisitWithExpectation(expr->expression(), cache_.kAsmInt,
                                    "operand expected to be integer"));
-      IntersectResult(expr, cache_.kAsmSigned);
+      RECURSE(IntersectResult(expr, cache_.kAsmSigned));
       return;
     case Token::DELETE:
       FAIL(expr, "delete operator encountered");
@@ -1156,7 +1163,7 @@
       FAIL(expr, "ill-typed bitwise operation");
     }
   }
-  IntersectResult(expr, result_type);
+  RECURSE(IntersectResult(expr, result_type));
 }
 
 
@@ -1188,7 +1195,7 @@
                                    "left comma operand expected to be any"));
       RECURSE(VisitWithExpectation(expr->right(), Type::Any(),
                                    "right comma operand expected to be any"));
-      IntersectResult(expr, computed_type_);
+      RECURSE(IntersectResult(expr, computed_type_));
       return;
     }
     case Token::OR:
@@ -1217,7 +1224,7 @@
           bounds_.set(left, Bounds(cache_.kSingletonOne));
           RECURSE(VisitWithExpectation(expr->right(), cache_.kAsmIntQ,
                                        "not operator expects an integer"));
-          IntersectResult(expr, cache_.kAsmSigned);
+          RECURSE(IntersectResult(expr, cache_.kAsmSigned));
           return;
         } else {
           FAIL(left, "unexpected false");
@@ -1279,7 +1286,7 @@
             FAIL(expr, "multiply must be by value in -2^20 < n < 2^20");
           }
           intish_ = i;
-          IntersectResult(expr, cache_.kAsmInt);
+          RECURSE(IntersectResult(expr, cache_.kAsmInt));
           return;
         } else {
           intish_ = left_intish + right_intish + 1;
@@ -1292,7 +1299,7 @@
               FAIL(expr, "too many consecutive multiplicative ops");
             }
           }
-          IntersectResult(expr, cache_.kAsmInt);
+          RECURSE(IntersectResult(expr, cache_.kAsmInt));
           return;
         }
       } else if (expr->op() == Token::MUL && expr->right()->IsLiteral() &&
@@ -1318,7 +1325,7 @@
                 "unary + only allowed on signed, unsigned, float?, or double?");
           }
         }
-        IntersectResult(expr, cache_.kAsmDouble);
+        RECURSE(IntersectResult(expr, cache_.kAsmDouble));
         return;
       } else if (expr->op() == Token::MUL && left_type->Is(cache_.kAsmDouble) &&
                  expr->right()->IsLiteral() &&
@@ -1326,17 +1333,17 @@
                  expr->right()->AsLiteral()->raw_value()->AsNumber() == -1.0) {
         // For unary -, expressed as x * -1
         bounds_.set(expr->right(), Bounds(cache_.kAsmDouble));
-        IntersectResult(expr, cache_.kAsmDouble);
+        RECURSE(IntersectResult(expr, cache_.kAsmDouble));
         return;
       } else if (type->Is(cache_.kAsmFloat) && expr->op() != Token::MOD) {
         if (left_intish != 0 || right_intish != 0) {
           FAIL(expr, "float operation before required fround");
         }
-        IntersectResult(expr, cache_.kAsmFloat);
+        RECURSE(IntersectResult(expr, cache_.kAsmFloat));
         intish_ = 1;
         return;
       } else if (type->Is(cache_.kAsmDouble)) {
-        IntersectResult(expr, cache_.kAsmDouble);
+        RECURSE(IntersectResult(expr, cache_.kAsmDouble));
         return;
       } else {
         FAIL(expr, "ill-typed arithmetic operation");
@@ -1378,7 +1385,7 @@
     FAIL(expr, "left and right side of comparison must match");
   }
 
-  IntersectResult(expr, cache_.kAsmSigned);
+  RECURSE(IntersectResult(expr, cache_.kAsmSigned));
 }
 
 
@@ -1598,6 +1605,15 @@
 void AsmTyper::IntersectResult(Expression* expr, Type* type) {
   computed_type_ = type;
   Type* bounded_type = Type::Intersect(computed_type_, expected_type_, zone());
+  if (Type::Representation(bounded_type, zone())->Is(Type::None())) {
+#ifdef DEBUG
+    PrintF("Computed type: ");
+    computed_type_->Print();
+    PrintF("Expected type: ");
+    expected_type_->Print();
+#endif
+    FAIL(expr, "type mismatch");
+  }
   bounds_.set(expr, Bounds(bounded_type));
 }
 
@@ -1608,7 +1624,7 @@
   expected_type_ = expected_type;
   RECURSE(Visit(expr));
   Type* bounded_type = Type::Intersect(computed_type_, expected_type_, zone());
-  if (bounded_type->Is(Type::None())) {
+  if (Type::Representation(bounded_type, zone())->Is(Type::None())) {
 #ifdef DEBUG
     PrintF("Computed type: ");
     computed_type_->Print();
diff --git a/src/uri.cc b/src/uri.cc
index c459be5..0107721 100644
--- a/src/uri.cc
+++ b/src/uri.cc
@@ -8,10 +8,197 @@
 #include "src/handles.h"
 #include "src/isolate-inl.h"
 #include "src/list.h"
+#include "src/string-search.h"
 
 namespace v8 {
 namespace internal {
 
+namespace {  // anonymous namespace for DecodeURI helper functions
+bool IsReservedPredicate(uc16 c) {
+  switch (c) {
+    case '#':
+    case '$':
+    case '&':
+    case '+':
+    case ',':
+    case '/':
+    case ':':
+    case ';':
+    case '=':
+    case '?':
+    case '@':
+      return true;
+    default:
+      return false;
+  }
+}
+
+bool IsReplacementCharacter(const uint8_t* octets, int length) {
+  // The replacement character is at codepoint U+FFFD in the Unicode Specials
+  // table. Its UTF-8 encoding is 0xEF 0xBF 0xBD.
+  if (length != 3 || octets[0] != 0xef || octets[1] != 0xbf ||
+      octets[2] != 0xbd) {
+    return false;
+  }
+  return true;
+}
+
+bool DecodeOctets(const uint8_t* octets, int length, List<uc16>* buffer) {
+  size_t cursor = 0;
+  uc32 value = unibrow::Utf8::ValueOf(octets, length, &cursor);
+  if (value == unibrow::Utf8::kBadChar &&
+      !IsReplacementCharacter(octets, length)) {
+    return false;
+  }
+
+  if (value <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
+    buffer->Add(value);
+  } else {
+    buffer->Add(unibrow::Utf16::LeadSurrogate(value));
+    buffer->Add(unibrow::Utf16::TrailSurrogate(value));
+  }
+  return true;
+}
+
+int TwoDigitHex(uc16 character1, uc16 character2) {
+  if (character1 > 'f') return -1;
+  int high = HexValue(character1);
+  if (high == -1) return -1;
+  if (character2 > 'f') return -1;
+  int low = HexValue(character2);
+  if (low == -1) return -1;
+  return (high << 4) + low;
+}
+
+template <typename T>
+void AddToBuffer(uc16 decoded, String::FlatContent* uri_content, int index,
+                 bool is_uri, List<T>* buffer) {
+  if (is_uri && IsReservedPredicate(decoded)) {
+    buffer->Add('%');
+    uc16 first = uri_content->Get(index + 1);
+    uc16 second = uri_content->Get(index + 2);
+    DCHECK_GT(std::numeric_limits<T>::max(), first);
+    DCHECK_GT(std::numeric_limits<T>::max(), second);
+
+    buffer->Add(first);
+    buffer->Add(second);
+  } else {
+    buffer->Add(decoded);
+  }
+}
+
+bool IntoTwoByte(int index, bool is_uri, int uri_length,
+                 String::FlatContent* uri_content, List<uc16>* buffer) {
+  for (int k = index; k < uri_length; k++) {
+    uc16 code = uri_content->Get(k);
+    if (code == '%') {
+      uc16 decoded;
+      if (k + 2 >= uri_length ||
+          (decoded = TwoDigitHex(uri_content->Get(k + 1),
+                                 uri_content->Get(k + 2))) < 0) {
+        return false;
+      }
+      k += 2;
+      if (decoded > unibrow::Utf8::kMaxOneByteChar) {
+        uint8_t octets[unibrow::Utf8::kMaxEncodedSize];
+        octets[0] = decoded;
+
+        int number_of_continuation_bytes = 0;
+        while ((decoded << ++number_of_continuation_bytes) & 0x80) {
+          if (number_of_continuation_bytes > 3 || k + 3 >= uri_length) {
+            return false;
+          }
+
+          uc16 continuation_byte;
+
+          if (uri_content->Get(++k) != '%' ||
+              (continuation_byte = TwoDigitHex(uri_content->Get(k + 1),
+                                               uri_content->Get(k + 2))) < 0) {
+            return false;
+          }
+          k += 2;
+          octets[number_of_continuation_bytes] = continuation_byte;
+        }
+
+        if (!DecodeOctets(octets, number_of_continuation_bytes, buffer)) {
+          return false;
+        }
+      } else {
+        AddToBuffer(decoded, uri_content, k - 2, is_uri, buffer);
+      }
+    } else {
+      buffer->Add(code);
+    }
+  }
+  return true;
+}
+
+bool IntoOneAndTwoByte(Handle<String> uri, bool is_uri,
+                       List<uint8_t>* one_byte_buffer,
+                       List<uc16>* two_byte_buffer) {
+  DisallowHeapAllocation no_gc;
+  String::FlatContent uri_content = uri->GetFlatContent();
+
+  int uri_length = uri->length();
+  for (int k = 0; k < uri_length; k++) {
+    uc16 code = uri_content.Get(k);
+    if (code == '%') {
+      uc16 decoded;
+      if (k + 2 >= uri_length ||
+          (decoded = TwoDigitHex(uri_content.Get(k + 1),
+                                 uri_content.Get(k + 2))) < 0) {
+        return false;
+      }
+
+      if (decoded > unibrow::Utf8::kMaxOneByteChar) {
+        return IntoTwoByte(k, is_uri, uri_length, &uri_content,
+                           two_byte_buffer);
+      }
+
+      AddToBuffer(decoded, &uri_content, k, is_uri, one_byte_buffer);
+      k += 2;
+    } else {
+      if (code > unibrow::Utf8::kMaxOneByteChar) {
+        return IntoTwoByte(k, is_uri, uri_length, &uri_content,
+                           two_byte_buffer);
+      }
+      one_byte_buffer->Add(code);
+    }
+  }
+  return true;
+}
+
+}  // anonymous namespace
+
+MaybeHandle<String> Uri::Decode(Isolate* isolate, Handle<String> uri,
+                                bool is_uri) {
+  uri = String::Flatten(uri);
+  List<uint8_t> one_byte_buffer;
+  List<uc16> two_byte_buffer;
+
+  if (!IntoOneAndTwoByte(uri, is_uri, &one_byte_buffer, &two_byte_buffer)) {
+    THROW_NEW_ERROR(isolate, NewURIError(), String);
+  }
+
+  if (two_byte_buffer.is_empty()) {
+    return isolate->factory()->NewStringFromOneByte(
+        one_byte_buffer.ToConstVector());
+  }
+
+  Handle<SeqTwoByteString> result;
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, result, isolate->factory()->NewRawTwoByteString(
+                           one_byte_buffer.length() + two_byte_buffer.length()),
+      String);
+
+  CopyChars(result->GetChars(), one_byte_buffer.ToConstVector().start(),
+            one_byte_buffer.length());
+  CopyChars(result->GetChars() + one_byte_buffer.length(),
+            two_byte_buffer.ToConstVector().start(), two_byte_buffer.length());
+
+  return result;
+}
+
 namespace {  // anonymous namespace for EncodeURI helper functions
 bool IsUnescapePredicateInUriComponent(uc16 c) {
   if (IsAlphaNumeric(c)) {
@@ -53,43 +240,36 @@
   }
 }
 
-void AddHexEncodedToBuffer(uint8_t octet, List<uint8_t>* buffer) {
+void AddEncodedOctetToBuffer(uint8_t octet, List<uint8_t>* buffer) {
   buffer->Add('%');
   buffer->Add(HexCharOfValue(octet >> 4));
   buffer->Add(HexCharOfValue(octet & 0x0F));
 }
 
 void EncodeSingle(uc16 c, List<uint8_t>* buffer) {
-  uint8_t x = (c >> 12) & 0xF;
-  uint8_t y = (c >> 6) & 63;
-  uint8_t z = c & 63;
-  if (c <= 0x007F) {
-    AddHexEncodedToBuffer(c, buffer);
-  } else if (c <= 0x07FF) {
-    AddHexEncodedToBuffer(y + 192, buffer);
-    AddHexEncodedToBuffer(z + 128, buffer);
-  } else {
-    AddHexEncodedToBuffer(x + 224, buffer);
-    AddHexEncodedToBuffer(y + 128, buffer);
-    AddHexEncodedToBuffer(z + 128, buffer);
+  char s[4] = {};
+  int number_of_bytes;
+  number_of_bytes =
+      unibrow::Utf8::Encode(s, c, unibrow::Utf16::kNoPreviousCharacter, false);
+  for (int k = 0; k < number_of_bytes; k++) {
+    AddEncodedOctetToBuffer(s[k], buffer);
   }
 }
 
 void EncodePair(uc16 cc1, uc16 cc2, List<uint8_t>* buffer) {
-  uint8_t u = ((cc1 >> 6) & 0xF) + 1;
-  uint8_t w = (cc1 >> 2) & 0xF;
-  uint8_t x = cc1 & 3;
-  uint8_t y = (cc2 >> 6) & 0xF;
-  uint8_t z = cc2 & 63;
-  AddHexEncodedToBuffer((u >> 2) + 240, buffer);
-  AddHexEncodedToBuffer((((u & 3) << 4) | w) + 128, buffer);
-  AddHexEncodedToBuffer(((x << 4) | y) + 128, buffer);
-  AddHexEncodedToBuffer(z + 128, buffer);
+  char s[4] = {};
+  int number_of_bytes =
+      unibrow::Utf8::Encode(s, unibrow::Utf16::CombineSurrogatePair(cc1, cc2),
+                            unibrow::Utf16::kNoPreviousCharacter, false);
+  for (int k = 0; k < number_of_bytes; k++) {
+    AddEncodedOctetToBuffer(s[k], buffer);
+  }
 }
 
 }  // anonymous namespace
 
-Object* Uri::Encode(Isolate* isolate, Handle<String> uri, bool is_uri) {
+MaybeHandle<String> Uri::Encode(Isolate* isolate, Handle<String> uri,
+                                bool is_uri) {
   uri = String::Flatten(uri);
   int uri_length = uri->length();
   List<uint8_t> buffer(uri_length);
@@ -120,15 +300,205 @@
       }
 
       AllowHeapAllocation allocate_error_and_return;
-      THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewURIError());
+      THROW_NEW_ERROR(isolate, NewURIError(), String);
     }
   }
 
+  return isolate->factory()->NewStringFromOneByte(buffer.ToConstVector());
+}
+
+namespace {  // Anonymous namespace for Escape and Unescape
+
+template <typename Char>
+int UnescapeChar(Vector<const Char> vector, int i, int length, int* step) {
+  uint16_t character = vector[i];
+  int32_t hi = 0;
+  int32_t lo = 0;
+  if (character == '%' && i <= length - 6 && vector[i + 1] == 'u' &&
+      (hi = TwoDigitHex(vector[i + 2], vector[i + 3])) > -1 &&
+      (lo = TwoDigitHex(vector[i + 4], vector[i + 5])) > -1) {
+    *step = 6;
+    return (hi << 8) + lo;
+  } else if (character == '%' && i <= length - 3 &&
+             (lo = TwoDigitHex(vector[i + 1], vector[i + 2])) > -1) {
+    *step = 3;
+    return lo;
+  } else {
+    *step = 1;
+    return character;
+  }
+}
+
+template <typename Char>
+MaybeHandle<String> UnescapeSlow(Isolate* isolate, Handle<String> string,
+                                 int start_index) {
+  bool one_byte = true;
+  int length = string->length();
+
+  int unescaped_length = 0;
+  {
+    DisallowHeapAllocation no_allocation;
+    Vector<const Char> vector = string->GetCharVector<Char>();
+    for (int i = start_index; i < length; unescaped_length++) {
+      int step;
+      if (UnescapeChar(vector, i, length, &step) >
+          String::kMaxOneByteCharCode) {
+        one_byte = false;
+      }
+      i += step;
+    }
+  }
+
+  DCHECK(start_index < length);
+  Handle<String> first_part =
+      isolate->factory()->NewProperSubString(string, 0, start_index);
+
+  int dest_position = 0;
+  Handle<String> second_part;
+  DCHECK(unescaped_length <= String::kMaxLength);
+  if (one_byte) {
+    Handle<SeqOneByteString> dest = isolate->factory()
+                                        ->NewRawOneByteString(unescaped_length)
+                                        .ToHandleChecked();
+    DisallowHeapAllocation no_allocation;
+    Vector<const Char> vector = string->GetCharVector<Char>();
+    for (int i = start_index; i < length; dest_position++) {
+      int step;
+      dest->SeqOneByteStringSet(dest_position,
+                                UnescapeChar(vector, i, length, &step));
+      i += step;
+    }
+    second_part = dest;
+  } else {
+    Handle<SeqTwoByteString> dest = isolate->factory()
+                                        ->NewRawTwoByteString(unescaped_length)
+                                        .ToHandleChecked();
+    DisallowHeapAllocation no_allocation;
+    Vector<const Char> vector = string->GetCharVector<Char>();
+    for (int i = start_index; i < length; dest_position++) {
+      int step;
+      dest->SeqTwoByteStringSet(dest_position,
+                                UnescapeChar(vector, i, length, &step));
+      i += step;
+    }
+    second_part = dest;
+  }
+  return isolate->factory()->NewConsString(first_part, second_part);
+}
+
+bool IsNotEscaped(uint16_t c) {
+  if (IsAlphaNumeric(c)) {
+    return true;
+  }
+  //  @*_+-./
+  switch (c) {
+    case '@':
+    case '*':
+    case '_':
+    case '+':
+    case '-':
+    case '.':
+    case '/':
+      return true;
+    default:
+      return false;
+  }
+}
+
+template <typename Char>
+static MaybeHandle<String> UnescapePrivate(Isolate* isolate,
+                                           Handle<String> source) {
+  int index;
+  {
+    DisallowHeapAllocation no_allocation;
+    StringSearch<uint8_t, Char> search(isolate, STATIC_CHAR_VECTOR("%"));
+    index = search.Search(source->GetCharVector<Char>(), 0);
+    if (index < 0) return source;
+  }
+  return UnescapeSlow<Char>(isolate, source, index);
+}
+
+template <typename Char>
+static MaybeHandle<String> EscapePrivate(Isolate* isolate,
+                                         Handle<String> string) {
+  DCHECK(string->IsFlat());
+  int escaped_length = 0;
+  int length = string->length();
+
+  {
+    DisallowHeapAllocation no_allocation;
+    Vector<const Char> vector = string->GetCharVector<Char>();
+    for (int i = 0; i < length; i++) {
+      uint16_t c = vector[i];
+      if (c >= 256) {
+        escaped_length += 6;
+      } else if (IsNotEscaped(c)) {
+        escaped_length++;
+      } else {
+        escaped_length += 3;
+      }
+
+      // We don't allow strings that are longer than a maximal length.
+      DCHECK(String::kMaxLength < 0x7fffffff - 6);     // Cannot overflow.
+      if (escaped_length > String::kMaxLength) break;  // Provoke exception.
+    }
+  }
+
+  // No length change implies no change.  Return original string if no change.
+  if (escaped_length == length) return string;
+
+  Handle<SeqOneByteString> dest;
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, dest, isolate->factory()->NewRawOneByteString(escaped_length),
+      String);
+  int dest_position = 0;
+
+  {
+    DisallowHeapAllocation no_allocation;
+    Vector<const Char> vector = string->GetCharVector<Char>();
+    for (int i = 0; i < length; i++) {
+      uint16_t c = vector[i];
+      if (c >= 256) {
+        dest->SeqOneByteStringSet(dest_position, '%');
+        dest->SeqOneByteStringSet(dest_position + 1, 'u');
+        dest->SeqOneByteStringSet(dest_position + 2, HexCharOfValue(c >> 12));
+        dest->SeqOneByteStringSet(dest_position + 3,
+                                  HexCharOfValue((c >> 8) & 0xf));
+        dest->SeqOneByteStringSet(dest_position + 4,
+                                  HexCharOfValue((c >> 4) & 0xf));
+        dest->SeqOneByteStringSet(dest_position + 5, HexCharOfValue(c & 0xf));
+        dest_position += 6;
+      } else if (IsNotEscaped(c)) {
+        dest->SeqOneByteStringSet(dest_position, c);
+        dest_position++;
+      } else {
+        dest->SeqOneByteStringSet(dest_position, '%');
+        dest->SeqOneByteStringSet(dest_position + 1, HexCharOfValue(c >> 4));
+        dest->SeqOneByteStringSet(dest_position + 2, HexCharOfValue(c & 0xf));
+        dest_position += 3;
+      }
+    }
+  }
+
+  return dest;
+}
+
+}  // Anonymous namespace
+
+MaybeHandle<String> Uri::Escape(Isolate* isolate, Handle<String> string) {
   Handle<String> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      isolate->factory()->NewStringFromOneByte(buffer.ToConstVector()));
-  return *result;
+  string = String::Flatten(string);
+  return string->IsOneByteRepresentationUnderneath()
+             ? EscapePrivate<uint8_t>(isolate, string)
+             : EscapePrivate<uc16>(isolate, string);
+}
+
+MaybeHandle<String> Uri::Unescape(Isolate* isolate, Handle<String> string) {
+  Handle<String> result;
+  string = String::Flatten(string);
+  return string->IsOneByteRepresentationUnderneath()
+             ? UnescapePrivate<uint8_t>(isolate, string)
+             : UnescapePrivate<uc16>(isolate, string);
 }
 
 }  // namespace internal
diff --git a/src/uri.h b/src/uri.h
index e41e8a2..dfa057f 100644
--- a/src/uri.h
+++ b/src/uri.h
@@ -13,23 +13,39 @@
 
 class Uri : public AllStatic {
  public:
-  static Object* EncodeUri(Isolate* isolate, Handle<String> uri) {
+  // ES6 section 18.2.6.2 decodeURI (encodedURI)
+  static MaybeHandle<String> DecodeUri(Isolate* isolate, Handle<String> uri) {
+    return Decode(isolate, uri, true);
+  }
+
+  // ES6 section 18.2.6.3 decodeURIComponent (encodedURIComponent)
+  static MaybeHandle<String> DecodeUriComponent(Isolate* isolate,
+                                                Handle<String> component) {
+    return Decode(isolate, component, false);
+  }
+
+  // ES6 section 18.2.6.4 encodeURI (uri)
+  static MaybeHandle<String> EncodeUri(Isolate* isolate, Handle<String> uri) {
     return Encode(isolate, uri, true);
   }
 
-  static Object* EncodeUriComponent(Isolate* isolate,
-                                    Handle<String> component) {
+  // ES6 section 18.2.6.5 encodeURIComponenet (uriComponent)
+  static MaybeHandle<String> EncodeUriComponent(Isolate* isolate,
+                                                Handle<String> component) {
     return Encode(isolate, component, false);
   }
 
-  // DecodeUri
-  // DecodeUriComponent
-  // escape
-  // unescape
+  // ES6 section B.2.1.1 escape (string)
+  static MaybeHandle<String> Escape(Isolate* isolate, Handle<String> string);
+
+  // ES6 section B.2.1.2 unescape (string)
+  static MaybeHandle<String> Unescape(Isolate* isolate, Handle<String> string);
 
  private:
-  static Object* Encode(Isolate* isolate, Handle<String> uri, bool is_uri);
-  // decode
+  static MaybeHandle<String> Decode(Isolate* isolate, Handle<String> uri,
+                                    bool is_uri);
+  static MaybeHandle<String> Encode(Isolate* isolate, Handle<String> uri,
+                                    bool is_uri);
 };
 
 }  // namespace internal
diff --git a/src/utils.cc b/src/utils.cc
index c46028f..16b5b7c 100644
--- a/src/utils.cc
+++ b/src/utils.cc
@@ -430,11 +430,8 @@
 
 bool DoubleToBoolean(double d) {
   // NaN, +0, and -0 should return the false object
-#if V8_TARGET_LITTLE_ENDIAN
-  union IeeeDoubleLittleEndianArchType u;
-#else
-  union IeeeDoubleBigEndianArchType u;
-#endif
+  IeeeDoubleArchType u;
+
   u.d = d;
   if (u.bits.exp == 2047) {
     // Detect NaN for IEEE double precision floating point.
diff --git a/src/utils.h b/src/utils.h
index 9a60141..9b94a2f 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -1524,6 +1524,10 @@
 #endif  // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
 }
 
+static inline double ReadFloatValue(const void* p) {
+  return ReadUnalignedValue<float>(p);
+}
+
 static inline double ReadDoubleValue(const void* p) {
   return ReadUnalignedValue<double>(p);
 }
diff --git a/src/v8.cc b/src/v8.cc
index 154cf62..d660b58 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -14,9 +14,9 @@
 #include "src/elements.h"
 #include "src/frames.h"
 #include "src/isolate.h"
+#include "src/libsampler/v8-sampler.h"
 #include "src/objects.h"
 #include "src/profiler/heap-profiler.h"
-#include "src/profiler/sampler.h"
 #include "src/runtime-profiler.h"
 #include "src/snapshot/natives.h"
 #include "src/snapshot/snapshot.h"
@@ -45,10 +45,9 @@
   Bootstrapper::TearDownExtensions();
   ElementsAccessor::TearDown();
   LOperand::TearDownCaches();
-  ExternalReference::TearDownMathExpData();
   RegisteredExtension::UnregisterAll();
   Isolate::GlobalTearDown();
-  Sampler::TearDown();
+  sampler::Sampler::TearDown();
   FlagList::ResetAllFlags();  // Frees memory held by string arguments.
 }
 
@@ -76,7 +75,7 @@
 
   Isolate::InitializeOncePerProcess();
 
-  Sampler::SetUp();
+  sampler::Sampler::SetUp();
   CpuFeatures::Probe(false);
   ElementsAccessor::InitializeOncePerProcess();
   LOperand::SetUpCaches();
diff --git a/src/v8.gyp b/src/v8.gyp
index ef0c562..84c361e 100644
--- a/src/v8.gyp
+++ b/src/v8.gyp
@@ -27,7 +27,6 @@
 
 {
   'variables': {
-    'icu_use_data_file_flag%': 0,
     'v8_code': 1,
     'v8_random_seed%': 314159265,
     'v8_vector_stores%': 0,
@@ -381,14 +380,14 @@
       'type': 'static_library',
       'dependencies': [
         'v8_libbase',
+        'v8_libsampler',
       ],
       'variables': {
         'optimize': 'max',
       },
       'include_dirs+': [
         '..',
-        # To be able to find base/trace_event/common/trace_event_common.h
-        '../..',
+        '<(DEPTH)',
       ],
       'sources': [  ### gcmole(all) ###
         '../include/v8-debug.h',
@@ -412,6 +411,7 @@
         'api-experimental.h',
         'api.cc',
         'api.h',
+        'api-arguments-inl.h',
         'api-arguments.cc',
         'api-arguments.h',
         'api-natives.cc',
@@ -469,6 +469,7 @@
         'char-predicates-inl.h',
         'char-predicates.h',
         'checks.h',
+        'code-events.h',
         'code-factory.cc',
         'code-factory.h',
         'code-stub-assembler.cc',
@@ -504,8 +505,8 @@
         'compiler/bytecode-graph-builder.cc',
         'compiler/bytecode-graph-builder.h',
         'compiler/c-linkage.cc',
-        'compiler/coalesced-live-ranges.cc',
-        'compiler/coalesced-live-ranges.h',
+        'compiler/checkpoint-elimination.cc',
+        'compiler/checkpoint-elimination.h',
         'compiler/code-generator-impl.h',
         'compiler/code-generator.cc',
         'compiler/code-generator.h',
@@ -550,8 +551,6 @@
         'compiler/graph-visualizer.h',
         'compiler/graph.cc',
         'compiler/graph.h',
-        'compiler/greedy-allocator.cc',
-        'compiler/greedy-allocator.h',
         'compiler/instruction-codes.h',
         'compiler/instruction-selector-impl.h',
         'compiler/instruction-selector.cc',
@@ -625,6 +624,8 @@
         'compiler/node.h',
         'compiler/opcodes.cc',
         'compiler/opcodes.h',
+        'compiler/operation-typer.cc',
+        'compiler/operation-typer.h',
         'compiler/operator-properties.cc',
         'compiler/operator-properties.h',
         'compiler/operator.cc',
@@ -637,6 +638,8 @@
         'compiler/pipeline-statistics.h',
         'compiler/raw-machine-assembler.cc',
         'compiler/raw-machine-assembler.h',
+        'compiler/redundancy-elimination.cc',
+        'compiler/redundancy-elimination.h',
         'compiler/register-allocator.cc',
         'compiler/register-allocator.h',
         'compiler/register-allocator-verifier.cc',
@@ -659,6 +662,8 @@
         'compiler/source-position.h',
         'compiler/state-values-utils.cc',
         'compiler/state-values-utils.h',
+        'compiler/store-store-elimination.cc',
+        'compiler/store-store-elimination.h',
         'compiler/tail-call-optimization.cc',
         'compiler/tail-call-optimization.h',
         'compiler/type-hint-analyzer.cc',
@@ -778,6 +783,8 @@
         'dtoa.cc',
         'dtoa.h',
         'effects.h',
+        'eh-frame.cc',
+        'eh-frame.h',
         'elements-kind.cc',
         'elements-kind.h',
         'elements.cc',
@@ -828,8 +835,8 @@
         'handles-inl.h',
         'handles.cc',
         'handles.h',
-        'hashmap.h',
         'heap-symbols.h',
+        'heap/array-buffer-tracker-inl.h',
         'heap/array-buffer-tracker.cc',
         'heap/array-buffer-tracker.h',
         'heap/memory-reducer.cc',
@@ -897,14 +904,19 @@
         'interpreter/bytecode-array-iterator.h',
         'interpreter/bytecode-array-writer.cc',
         'interpreter/bytecode-array-writer.h',
+        'interpreter/bytecode-dead-code-optimizer.cc',
+        'interpreter/bytecode-dead-code-optimizer.h',
+        'interpreter/bytecode-label.h',
+        'interpreter/bytecode-generator.cc',
+        'interpreter/bytecode-generator.h',
         'interpreter/bytecode-peephole-optimizer.cc',
         'interpreter/bytecode-peephole-optimizer.h',
         'interpreter/bytecode-pipeline.cc',
         'interpreter/bytecode-pipeline.h',
         'interpreter/bytecode-register-allocator.cc',
         'interpreter/bytecode-register-allocator.h',
-        'interpreter/bytecode-generator.cc',
-        'interpreter/bytecode-generator.h',
+        'interpreter/bytecode-register-optimizer.cc',
+        'interpreter/bytecode-register-optimizer.h',
         'interpreter/bytecode-traits.h',
         'interpreter/constant-array-builder.cc',
         'interpreter/constant-array-builder.h',
@@ -923,7 +935,9 @@
         'isolate-inl.h',
         'isolate.cc',
         'isolate.h',
+        'json-parser.cc',
         'json-parser.h',
+        'json-stringifier.cc',
         'json-stringifier.h',
         'keys.h',
         'keys.cc',
@@ -996,11 +1010,11 @@
         'profiler/heap-snapshot-generator-inl.h',
         'profiler/heap-snapshot-generator.cc',
         'profiler/heap-snapshot-generator.h',
+        'profiler/profiler-listener.cc',
+        'profiler/profiler-listener.h',
         'profiler/profile-generator-inl.h',
         'profiler/profile-generator.cc',
         'profiler/profile-generator.h',
-        'profiler/sampler.cc',
-        'profiler/sampler.h',
         'profiler/sampling-heap-profiler.cc',
         'profiler/sampling-heap-profiler.h',
         'profiler/strings-storage.cc',
@@ -1052,7 +1066,6 @@
         'runtime/runtime-i18n.cc',
         'runtime/runtime-internal.cc',
         'runtime/runtime-interpreter.cc',
-        'runtime/runtime-json.cc',
         'runtime/runtime-literals.cc',
         'runtime/runtime-liveedit.cc',
         'runtime/runtime-maths.cc',
@@ -1067,7 +1080,6 @@
         'runtime/runtime-symbol.cc',
         'runtime/runtime-test.cc',
         'runtime/runtime-typedarray.cc',
-        'runtime/runtime-uri.cc',
         'runtime/runtime-utils.h',
         'runtime/runtime.cc',
         'runtime/runtime.h',
@@ -1146,6 +1158,8 @@
         'version.h',
         'vm-state-inl.h',
         'vm-state.h',
+        'wasm/asm-types.cc',
+        'wasm/asm-types.h',
         'wasm/asm-wasm-builder.cc',
         'wasm/asm-wasm-builder.h',
         'wasm/ast-decoder.cc',
@@ -1158,6 +1172,8 @@
         'wasm/module-decoder.h',
         'wasm/switch-logic.h',
         'wasm/switch-logic.cc',
+        'wasm/wasm-debug.cc',
+        'wasm/wasm-debug.h',
         'wasm/wasm-external-refs.cc',
         'wasm/wasm-external-refs.h',
         'wasm/wasm-function-name-table.cc',
@@ -1167,6 +1183,8 @@
         'wasm/wasm-macro-gen.h',
         'wasm/wasm-module.cc',
         'wasm/wasm-module.h',
+        'wasm/wasm-interpreter.cc',
+        'wasm/wasm-interpreter.h',
         'wasm/wasm-opcodes.cc',
         'wasm/wasm-opcodes.h',
         'wasm/wasm-result.cc',
@@ -1175,8 +1193,6 @@
         'zone.h',
         'zone-allocator.h',
         'zone-containers.h',
-        'third_party/fdlibm/fdlibm.cc',
-        'third_party/fdlibm/fdlibm.h',
       ],
       'conditions': [
         ['want_separate_host_toolset==1', {
@@ -1666,10 +1682,15 @@
         'base/cpu.h',
         'base/division-by-constant.cc',
         'base/division-by-constant.h',
+        'base/file-utils.cc',
+        'base/file-utils.h',
         'base/flags.h',
         'base/format-macros.h',
         'base/functional.cc',
         'base/functional.h',
+        'base/hashmap.h',
+        'base/ieee754.cc',
+        'base/ieee754.h',
         'base/iterator.h',
         'base/lazy-instance.h',
         'base/logging.cc',
@@ -1936,6 +1957,36 @@
       },
     },
     {
+      'target_name': 'v8_libsampler',
+      'type': 'static_library',
+      'variables': {
+        'optimize': 'max',
+      },
+      'dependencies': [
+        'v8_libbase',
+      ],
+      'include_dirs+': [
+        '..',
+        '../include',
+      ],
+      'sources': [
+        'libsampler/v8-sampler.cc',
+        'libsampler/v8-sampler.h'
+      ],
+      'conditions': [
+        ['want_separate_host_toolset==1', {
+          'toolsets': ['host', 'target'],
+        }, {
+          'toolsets': ['target'],
+        }],
+      ],
+      'direct_dependent_settings': {
+        'include_dirs': [
+          '../include',
+        ],
+      },
+    },
+    {
       'target_name': 'natives_blob',
       'type': 'none',
       'conditions': [
@@ -2013,7 +2064,6 @@
           'js/symbol.js',
           'js/array.js',
           'js/string.js',
-          'js/uri.js',
           'js/math.js',
           'third_party/fdlibm/fdlibm.js',
           'js/regexp.js',
@@ -2025,7 +2075,6 @@
           'js/collection-iterator.js',
           'js/promise.js',
           'js/messages.js',
-          'js/json.js',
           'js/array-iterator.js',
           'js/string-iterator.js',
           'js/templates.js',
@@ -2039,11 +2088,8 @@
           'js/macros.py',
           'messages.h',
           'js/harmony-atomics.js',
-          'js/harmony-regexp-exec.js',
           'js/harmony-sharedarraybuffer.js',
           'js/harmony-simd.js',
-          'js/harmony-species.js',
-          'js/harmony-unicode-regexps.js',
           'js/harmony-string-padding.js',
           'js/promise-extra.js',
           'js/harmony-async-await.js'
diff --git a/src/v8memory.h b/src/v8memory.h
index b1ae939..d34bce7 100644
--- a/src/v8memory.h
+++ b/src/v8memory.h
@@ -64,6 +64,13 @@
   static Handle<Object>& Object_Handle_at(Address addr)  {
     return *reinterpret_cast<Handle<Object>*>(addr);
   }
+
+  static bool IsAddressInRange(Address base, Address address, uint32_t size) {
+    uintptr_t numeric_base = reinterpret_cast<uintptr_t>(base);
+    uintptr_t numeric_address = reinterpret_cast<uintptr_t>(address);
+    return numeric_base <= numeric_address &&
+           numeric_address < numeric_base + size;
+  }
 };
 
 }  // namespace internal
diff --git a/src/wasm/asm-types.cc b/src/wasm/asm-types.cc
new file mode 100644
index 0000000..e5588ae
--- /dev/null
+++ b/src/wasm/asm-types.cc
@@ -0,0 +1,319 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/wasm/asm-types.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+AsmCallableType* AsmType::AsCallableType() {
+  if (AsValueType() != nullptr) {
+    return nullptr;
+  }
+
+  DCHECK(this->AsFunctionType() != nullptr ||
+         this->AsOverloadedFunctionType() != nullptr ||
+         this->AsFFIType() != nullptr ||
+         this->AsFunctionTableType() != nullptr);
+  return reinterpret_cast<AsmCallableType*>(this);
+}
+
+std::string AsmType::Name() {
+  AsmValueType* avt = this->AsValueType();
+  if (avt != nullptr) {
+    switch (avt->Bitset()) {
+#define RETURN_TYPE_NAME(CamelName, string_name, number, parent_types) \
+  case AsmValueType::kAsm##CamelName:                                  \
+    return string_name;
+      FOR_EACH_ASM_VALUE_TYPE_LIST(RETURN_TYPE_NAME)
+#undef RETURN_TYPE_NAME
+      default:
+        UNREACHABLE();
+    }
+  }
+
+  return this->AsCallableType()->Name();
+}
+
+bool AsmType::IsExactly(AsmType* that) {
+  // TODO(jpp): maybe this can become this == that.
+  AsmValueType* avt = this->AsValueType();
+  if (avt != nullptr) {
+    AsmValueType* tavt = that->AsValueType();
+    if (tavt == nullptr) {
+      return false;
+    }
+    return avt->Bitset() == tavt->Bitset();
+  }
+
+  // TODO(jpp): is it useful to allow non-value types to be tested with
+  // IsExactly?
+  return that == this;
+}
+
+bool AsmType::IsA(AsmType* that) {
+  // IsA is used for querying inheritance relationships. Therefore it is only
+  // meaningful for basic types.
+  AsmValueType* tavt = that->AsValueType();
+  if (tavt != nullptr) {
+    AsmValueType* avt = this->AsValueType();
+    if (avt == nullptr) {
+      return false;
+    }
+    return (avt->Bitset() & tavt->Bitset()) == tavt->Bitset();
+  }
+
+  // TODO(jpp): is it useful to allow non-value types to be tested with IsA?
+  return that == this;
+}
+
+int32_t AsmType::ElementSizeInBytes() {
+  auto* value = AsValueType();
+  if (value == nullptr) {
+    return AsmType::kNotHeapType;
+  }
+  switch (value->Bitset()) {
+    case AsmValueType::kAsmInt8Array:
+    case AsmValueType::kAsmUint8Array:
+      return 1;
+    case AsmValueType::kAsmInt16Array:
+    case AsmValueType::kAsmUint16Array:
+      return 2;
+    case AsmValueType::kAsmInt32Array:
+    case AsmValueType::kAsmUint32Array:
+    case AsmValueType::kAsmFloat32Array:
+      return 4;
+    case AsmValueType::kAsmFloat64Array:
+      return 8;
+    default:
+      return AsmType::kNotHeapType;
+  }
+}
+
+AsmType* AsmType::LoadType() {
+  auto* value = AsValueType();
+  if (value == nullptr) {
+    return AsmType::None();
+  }
+  switch (value->Bitset()) {
+    case AsmValueType::kAsmInt8Array:
+    case AsmValueType::kAsmUint8Array:
+    case AsmValueType::kAsmInt16Array:
+    case AsmValueType::kAsmUint16Array:
+    case AsmValueType::kAsmInt32Array:
+    case AsmValueType::kAsmUint32Array:
+      return AsmType::Intish();
+    case AsmValueType::kAsmFloat32Array:
+      return AsmType::FloatQ();
+    case AsmValueType::kAsmFloat64Array:
+      return AsmType::DoubleQ();
+    default:
+      return AsmType::None();
+  }
+}
+
+AsmType* AsmType::StoreType() {
+  auto* value = AsValueType();
+  if (value == nullptr) {
+    return AsmType::None();
+  }
+  switch (value->Bitset()) {
+    case AsmValueType::kAsmInt8Array:
+    case AsmValueType::kAsmUint8Array:
+    case AsmValueType::kAsmInt16Array:
+    case AsmValueType::kAsmUint16Array:
+    case AsmValueType::kAsmInt32Array:
+    case AsmValueType::kAsmUint32Array:
+      return AsmType::Intish();
+    case AsmValueType::kAsmFloat32Array:
+      return AsmType::FloatishDoubleQ();
+    case AsmValueType::kAsmFloat64Array:
+      return AsmType::FloatQDoubleQ();
+    default:
+      return AsmType::None();
+  }
+}
+
+std::string AsmFunctionType::Name() {
+  if (IsFroundType()) {
+    return "fround";
+  }
+
+  std::string ret;
+  ret += "(";
+  for (size_t ii = 0; ii < args_.size(); ++ii) {
+    ret += args_[ii]->Name();
+    if (ii != args_.size() - 1) {
+      ret += ", ";
+    }
+  }
+  if (IsMinMaxType()) {
+    DCHECK_EQ(args_.size(), 2);
+    ret += "...";
+  }
+  ret += ") -> ";
+  ret += return_type_->Name();
+  return ret;
+}
+
+namespace {
+class AsmFroundType final : public AsmFunctionType {
+ public:
+  bool IsFroundType() const override { return true; }
+
+ private:
+  friend AsmType;
+
+  explicit AsmFroundType(Zone* zone)
+      : AsmFunctionType(zone, AsmType::Float()) {}
+
+  AsmType* ValidateCall(AsmType* return_type,
+                        const ZoneVector<AsmType*>& args) override;
+};
+}  // namespace
+
+AsmType* AsmType::FroundType(Zone* zone) {
+  auto* Fround = new (zone) AsmFroundType(zone);
+  return reinterpret_cast<AsmType*>(Fround);
+}
+
+AsmType* AsmFroundType::ValidateCall(AsmType* return_type,
+                                     const ZoneVector<AsmType*>& args) {
+  if (args.size() != 1) {
+    return AsmType::None();
+  }
+
+  auto* arg = args[0];
+  if (!arg->IsA(AsmType::Floatish()) && !arg->IsA(AsmType::DoubleQ()) &&
+      !arg->IsA(AsmType::Signed()) && !arg->IsA(AsmType::Unsigned())) {
+    return AsmType::None();
+  }
+
+  return AsmType::Float();
+}
+
+namespace {
+class AsmMinMaxType final : public AsmFunctionType {
+ public:
+  bool IsMinMaxType() const override { return true; }
+
+ private:
+  friend AsmType;
+
+  AsmMinMaxType(Zone* zone, AsmType* dest, AsmType* src)
+      : AsmFunctionType(zone, dest) {
+    AddArgument(src);
+    AddArgument(src);
+  }
+
+  AsmType* ValidateCall(AsmType* return_type,
+                        const ZoneVector<AsmType*>& args) override {
+    if (!ReturnType()->IsExactly(return_type)) {
+      return AsmType::None();
+    }
+
+    if (args.size() < 2) {
+      return AsmType::None();
+    }
+
+    for (size_t ii = 0; ii < Arguments().size(); ++ii) {
+      if (!Arguments()[0]->IsExactly(args[ii])) {
+        return AsmType::None();
+      }
+    }
+
+    return ReturnType();
+  }
+};
+}  // namespace
+
+AsmType* AsmType::MinMaxType(Zone* zone, AsmType* dest, AsmType* src) {
+  DCHECK(dest->AsValueType() != nullptr);
+  DCHECK(src->AsValueType() != nullptr);
+  auto* MinMax = new (zone) AsmMinMaxType(zone, dest, src);
+  return reinterpret_cast<AsmType*>(MinMax);
+}
+
+AsmType* AsmFFIType::ValidateCall(AsmType* return_type,
+                                  const ZoneVector<AsmType*>& args) {
+  for (size_t ii = 0; ii < args.size(); ++ii) {
+    if (!args[ii]->IsA(AsmType::Extern())) {
+      return AsmType::None();
+    }
+  }
+
+  return return_type;
+}
+
+AsmType* AsmFunctionType::ValidateCall(AsmType* return_type,
+                                       const ZoneVector<AsmType*>& args) {
+  if (!return_type_->IsExactly(return_type)) {
+    return AsmType::None();
+  }
+
+  if (args_.size() != args.size()) {
+    return AsmType::None();
+  }
+
+  for (size_t ii = 0; ii < args_.size(); ++ii) {
+    if (!args_[ii]->IsExactly(args[ii])) {
+      return AsmType::None();
+    }
+  }
+
+  return return_type_;
+}
+
+std::string AsmOverloadedFunctionType::Name() {
+  std::string ret;
+
+  for (size_t ii = 0; ii < overloads_.size(); ++ii) {
+    if (ii != 0) {
+      ret += " /\\ ";
+    }
+    ret += overloads_[ii]->Name();
+  }
+
+  return ret;
+}
+
+AsmType* AsmOverloadedFunctionType::ValidateCall(
+    AsmType* return_type, const ZoneVector<AsmType*>& args) {
+  for (size_t ii = 0; ii < overloads_.size(); ++ii) {
+    auto* validated_type =
+        overloads_[ii]->AsCallableType()->ValidateCall(return_type, args);
+    if (validated_type != AsmType::None()) {
+      return validated_type;
+    }
+  }
+
+  return AsmType::None();
+}
+
+void AsmOverloadedFunctionType::AddOverload(AsmType* overload) {
+  DCHECK(overload->AsFunctionType() != nullptr);
+  overloads_.push_back(overload);
+}
+
+AsmFunctionTableType::AsmFunctionTableType(size_t length, AsmType* signature)
+    : length_(length), signature_(signature) {
+  DCHECK(signature_ != nullptr);
+  DCHECK(signature_->AsFunctionType() != nullptr);
+}
+
+std::string AsmFunctionTableType::Name() {
+  return signature_->Name() + "[" + std::to_string(length_) + "]";
+}
+
+AsmType* AsmFunctionTableType::ValidateCall(AsmType* return_type,
+                                            const ZoneVector<AsmType*>& args) {
+  return signature_->AsCallableType()->ValidateCall(return_type, args);
+}
+
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
diff --git a/src/wasm/asm-types.h b/src/wasm/asm-types.h
new file mode 100644
index 0000000..a102fc8
--- /dev/null
+++ b/src/wasm/asm-types.h
@@ -0,0 +1,344 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SRC_WASM_ASM_TYPES_H_
+#define SRC_WASM_ASM_TYPES_H_
+
+#include <string>
+
+#include "src/base/macros.h"
+#include "src/zone-containers.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class AsmType;
+class AsmFFIType;
+class AsmFunctionType;
+class AsmOverloadedFunctionType;
+class AsmFunctionTableType;
+
+// List of V(CamelName, string_name, number, parent_types)
+#define FOR_EACH_ASM_VALUE_TYPE_LIST(V)                                       \
+  /* These tags are not types that are expressable in the asm source. They */ \
+  /* are used to express semantic information about the types they tag.    */ \
+  V(Heap, "[]", 1, 0)                                                         \
+  /*The following are actual types that appear in the asm source. */          \
+  V(Void, "void", 2, 0)                                                       \
+  V(Extern, "extern", 3, 0)                                                   \
+  V(DoubleQ, "double?", 4, 0)                                                 \
+  V(Double, "double", 5, kAsmDoubleQ | kAsmExtern)                            \
+  V(Intish, "intish", 6, 0)                                                   \
+  V(Int, "int", 7, kAsmIntish)                                                \
+  V(Signed, "signed", 8, kAsmInt | kAsmExtern)                                \
+  V(Unsigned, "unsigned", 9, kAsmInt)                                         \
+  V(FixNum, "fixnum", 10, kAsmSigned | kAsmUnsigned)                          \
+  V(Floatish, "floatish", 11, 0)                                              \
+  V(FloatQ, "float?", 12, kAsmFloatish)                                       \
+  V(Float, "float", 13, kAsmFloatQ)                                           \
+  /* Types used for expressing the Heap accesses. */                          \
+  V(Uint8Array, "Uint8Array", 14, kAsmHeap)                                   \
+  V(Int8Array, "Int8Array", 15, kAsmHeap)                                     \
+  V(Uint16Array, "Uint16Array", 16, kAsmHeap)                                 \
+  V(Int16Array, "Int16Array", 17, kAsmHeap)                                   \
+  V(Uint32Array, "Uint32Array", 18, kAsmHeap)                                 \
+  V(Int32Array, "Int32Array", 19, kAsmHeap)                                   \
+  V(Float32Array, "Float32Array", 20, kAsmHeap)                               \
+  V(Float64Array, "Float64Array", 21, kAsmHeap)                               \
+  /* Pseudo-types used in representing heap access for fp types.*/            \
+  V(FloatishDoubleQ, "floatish|double?", 22, kAsmFloatish | kAsmDoubleQ)      \
+  V(FloatQDoubleQ, "float?|double?", 23, kAsmFloatQ | kAsmDoubleQ)            \
+  /* None is used to represent errors in the type checker. */                 \
+  V(None, "<none>", 31, 0)
+
+// List of V(CamelName)
+#define FOR_EACH_ASM_CALLABLE_TYPE_LIST(V) \
+  V(FunctionType)                          \
+  V(FFIType)                               \
+  V(OverloadedFunctionType)                \
+  V(FunctionTableType)
+
+class AsmValueType {
+ public:
+  typedef uint32_t bitset_t;
+
+  enum : uint32_t {
+#define DEFINE_TAG(CamelName, string_name, number, parent_types) \
+  kAsm##CamelName = ((1u << (number)) | (parent_types)),
+    FOR_EACH_ASM_VALUE_TYPE_LIST(DEFINE_TAG)
+#undef DEFINE_TAG
+        kAsmUnknown = 0,
+    kAsmValueTypeTag = 1u
+  };
+
+ private:
+  friend class AsmType;
+
+  static AsmValueType* AsValueType(AsmType* type) {
+    if ((reinterpret_cast<uintptr_t>(type) & kAsmValueTypeTag) ==
+        kAsmValueTypeTag) {
+      return reinterpret_cast<AsmValueType*>(type);
+    }
+    return nullptr;
+  }
+
+  bitset_t Bitset() const {
+    DCHECK((reinterpret_cast<uintptr_t>(this) & kAsmValueTypeTag) ==
+           kAsmValueTypeTag);
+    return static_cast<bitset_t>(reinterpret_cast<uintptr_t>(this) &
+                                 ~kAsmValueTypeTag);
+  }
+
+  static AsmType* New(bitset_t bits) {
+    DCHECK_EQ((bits & kAsmValueTypeTag), 0);
+    return reinterpret_cast<AsmType*>(
+        static_cast<uintptr_t>(bits | kAsmValueTypeTag));
+  }
+
+  // AsmValueTypes can't be created except through AsmValueType::New.
+  DISALLOW_IMPLICIT_CONSTRUCTORS(AsmValueType);
+};
+
+class AsmCallableType : public ZoneObject {
+ public:
+  virtual std::string Name() = 0;
+  virtual AsmType* ValidateCall(AsmType* return_type,
+                                const ZoneVector<AsmType*>& args) = 0;
+
+#define DECLARE_CAST(CamelName) \
+  virtual Asm##CamelName* As##CamelName() { return nullptr; }
+  FOR_EACH_ASM_CALLABLE_TYPE_LIST(DECLARE_CAST)
+#undef DECLARE_CAST
+
+ protected:
+  AsmCallableType() = default;
+  virtual ~AsmCallableType() = default;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(AsmCallableType);
+};
+
+class AsmFunctionType : public AsmCallableType {
+ public:
+  AsmFunctionType* AsFunctionType() final { return this; }
+
+  void AddArgument(AsmType* type) { args_.push_back(type); }
+  const ZoneVector<AsmType*> Arguments() const { return args_; }
+  AsmType* ReturnType() const { return return_type_; }
+
+  virtual bool IsMinMaxType() const { return false; }
+  virtual bool IsFroundType() const { return false; }
+
+ protected:
+  AsmFunctionType(Zone* zone, AsmType* return_type)
+      : return_type_(return_type), args_(zone) {}
+
+ private:
+  friend AsmType;
+
+  std::string Name() override;
+  AsmType* ValidateCall(AsmType* return_type,
+                        const ZoneVector<AsmType*>& args) override;
+
+  AsmType* return_type_;
+  ZoneVector<AsmType*> args_;
+
+  DISALLOW_COPY_AND_ASSIGN(AsmFunctionType);
+};
+
+class AsmOverloadedFunctionType final : public AsmCallableType {
+ public:
+  AsmOverloadedFunctionType* AsOverloadedFunctionType() override {
+    return this;
+  }
+
+  void AddOverload(AsmType* overload);
+
+ private:
+  friend AsmType;
+
+  explicit AsmOverloadedFunctionType(Zone* zone) : overloads_(zone) {}
+
+  std::string Name() override;
+  AsmType* ValidateCall(AsmType* return_type,
+                        const ZoneVector<AsmType*>& args) override;
+
+  ZoneVector<AsmType*> overloads_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(AsmOverloadedFunctionType);
+};
+
+class AsmFFIType final : public AsmCallableType {
+ public:
+  AsmFFIType* AsFFIType() override { return this; }
+
+  std::string Name() override { return "Function"; }
+  AsmType* ValidateCall(AsmType* return_type,
+                        const ZoneVector<AsmType*>& args) override;
+
+ private:
+  friend AsmType;
+
+  AsmFFIType() = default;
+
+  DISALLOW_COPY_AND_ASSIGN(AsmFFIType);
+};
+
+class AsmFunctionTableType : public AsmCallableType {
+ public:
+  AsmFunctionTableType* AsFunctionTableType() override { return this; }
+
+  std::string Name() override;
+
+  AsmType* ValidateCall(AsmType* return_type,
+                        const ZoneVector<AsmType*>& args) override;
+
+  size_t length() const { return length_; }
+
+ private:
+  friend class AsmType;
+
+  AsmFunctionTableType(size_t length, AsmType* signature);
+
+  size_t length_;
+  AsmType* signature_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(AsmFunctionTableType);
+};
+
+class AsmType {
+ public:
+#define DEFINE_CONSTRUCTOR(CamelName, string_name, number, parent_types) \
+  static AsmType* CamelName() {                                          \
+    return AsmValueType::New(AsmValueType::kAsm##CamelName);             \
+  }
+  FOR_EACH_ASM_VALUE_TYPE_LIST(DEFINE_CONSTRUCTOR)
+#undef DEFINE_CONSTRUCTOR
+
+#define DEFINE_CAST(CamelCase)                                        \
+  Asm##CamelCase* As##CamelCase() {                                   \
+    if (AsValueType() != nullptr) {                                   \
+      return nullptr;                                                 \
+    }                                                                 \
+    return reinterpret_cast<AsmCallableType*>(this)->As##CamelCase(); \
+  }
+  FOR_EACH_ASM_CALLABLE_TYPE_LIST(DEFINE_CAST)
+#undef DEFINE_CAST
+  AsmValueType* AsValueType() { return AsmValueType::AsValueType(this); }
+  AsmCallableType* AsCallableType();
+
+  // A function returning ret. Callers still need to invoke AddArgument with the
+  // returned type to fully create this type.
+  static AsmType* Function(Zone* zone, AsmType* ret) {
+    AsmFunctionType* f = new (zone) AsmFunctionType(zone, ret);
+    return reinterpret_cast<AsmType*>(f);
+  }
+
+  // Overloaded function types. Not creatable by asm source, but useful to
+  // represent the overloaded stdlib functions.
+  static AsmType* OverloadedFunction(Zone* zone) {
+    auto* f = new (zone) AsmOverloadedFunctionType(zone);
+    return reinterpret_cast<AsmType*>(f);
+  }
+
+  // The type for fround(src).
+  static AsmType* FroundType(Zone* zone);
+
+  // The (variadic) type for min and max.
+  static AsmType* MinMaxType(Zone* zone, AsmType* dest, AsmType* src);
+
+  // The type for foreign functions.
+  static AsmType* FFIType(Zone* zone) {
+    auto* f = new (zone) AsmFFIType();
+    return reinterpret_cast<AsmType*>(f);
+  }
+
+  // The type for function tables.
+  static AsmType* FunctionTableType(Zone* zone, size_t length,
+                                    AsmType* signature) {
+    auto* f = new (zone) AsmFunctionTableType(length, signature);
+    return reinterpret_cast<AsmType*>(f);
+  }
+
+  std::string Name();
+  // IsExactly returns true if this is the exact same type as that. For
+  // non-value types (e.g., callables), this returns this == that.
+  bool IsExactly(AsmType* that);
+  // IsA is used to query whether this is an instance of that (i.e., if this is
+  // a type derived from that.) For non-value types (e.g., callables), this
+  // returns this == that.
+  bool IsA(AsmType* that);
+
+  // Types allowed in return statements. void is the type for returns without
+  // an expression.
+  bool IsReturnType() {
+    return this == AsmType::Void() || this == AsmType::Double() ||
+           this == AsmType::Signed() || this == AsmType::Float();
+  }
+
+  // Converts this to the corresponding valid argument type.
+  AsmType* ToReturnType() {
+    if (this->IsA(AsmType::Signed())) {
+      return AsmType::Signed();
+    }
+    if (this->IsA(AsmType::Double())) {
+      return AsmType::Double();
+    }
+    if (this->IsA(AsmType::Float())) {
+      return AsmType::Float();
+    }
+    if (this->IsA(AsmType::Void())) {
+      return AsmType::Void();
+    }
+    return AsmType::None();
+  }
+
+  // Types allowed to be parameters in asm functions.
+  bool IsParameterType() {
+    return this == AsmType::Double() || this == AsmType::Int() ||
+           this == AsmType::Float();
+  }
+
+  // Converts this to the corresponding valid argument type.
+  AsmType* ToParameterType() {
+    if (this->IsA(AsmType::Int())) {
+      return AsmType::Int();
+    }
+    if (this->IsA(AsmType::Double())) {
+      return AsmType::Double();
+    }
+    if (this->IsA(AsmType::Float())) {
+      return AsmType::Float();
+    }
+    return AsmType::None();
+  }
+
+  // Types allowed to be compared using the comparison operators.
+  bool IsComparableType() {
+    return this == AsmType::Double() || this == AsmType::Signed() ||
+           this == AsmType::Unsigned() || this == AsmType::Float();
+  }
+
+  // The following methods are meant to be used for inspecting the traits of
+  // element types for the heap view types.
+  enum : int32_t { kNotHeapType = -1 };
+
+  // Returns the element size if this is a heap type. Otherwise returns
+  // kNotHeapType.
+  int32_t ElementSizeInBytes();
+  // Returns the load type if this is a heap type. AsmType::None is returned if
+  // this is not a heap type.
+  AsmType* LoadType();
+  // Returns the store type if this is a heap type. AsmType::None is returned if
+  // this is not a heap type.
+  AsmType* StoreType();
+};
+
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
+
+#endif  // SRC_WASM_ASM_TYPES_H_
diff --git a/src/wasm/asm-wasm-builder.cc b/src/wasm/asm-wasm-builder.cc
index 325058c..958fd0c 100644
--- a/src/wasm/asm-wasm-builder.cc
+++ b/src/wasm/asm-wasm-builder.cc
@@ -33,16 +33,23 @@
 
 enum AsmScope { kModuleScope, kInitScope, kFuncScope, kExportScope };
 
+struct ForeignVariable {
+  Handle<Name> name;
+  Variable* var;
+  LocalType type;
+};
+
 class AsmWasmBuilderImpl : public AstVisitor {
  public:
   AsmWasmBuilderImpl(Isolate* isolate, Zone* zone, FunctionLiteral* literal,
-                     Handle<Object> foreign, AsmTyper* typer)
-      : local_variables_(HashMap::PointersMatch,
+                     AsmTyper* typer)
+      : local_variables_(base::HashMap::PointersMatch,
                          ZoneHashMap::kDefaultHashMapCapacity,
                          ZoneAllocationPolicy(zone)),
-        functions_(HashMap::PointersMatch, ZoneHashMap::kDefaultHashMapCapacity,
+        functions_(base::HashMap::PointersMatch,
+                   ZoneHashMap::kDefaultHashMapCapacity,
                    ZoneAllocationPolicy(zone)),
-        global_variables_(HashMap::PointersMatch,
+        global_variables_(base::HashMap::PointersMatch,
                           ZoneHashMap::kDefaultHashMapCapacity,
                           ZoneAllocationPolicy(zone)),
         scope_(kModuleScope),
@@ -51,13 +58,14 @@
         literal_(literal),
         isolate_(isolate),
         zone_(zone),
-        foreign_(foreign),
         typer_(typer),
         cache_(TypeCache::Get()),
         breakable_blocks_(zone),
+        foreign_variables_(zone),
         init_function_index_(0),
+        foreign_init_function_index_(0),
         next_table_index_(0),
-        function_tables_(HashMap::PointersMatch,
+        function_tables_(base::HashMap::PointersMatch,
                          ZoneHashMap::kDefaultHashMapCapacity,
                          ZoneAllocationPolicy(zone)),
         imported_function_table_(this),
@@ -74,14 +82,48 @@
     current_function_builder_ = nullptr;
   }
 
-  void Compile() {
-    InitializeInitFunction();
-    RECURSE(VisitFunctionLiteral(literal_));
+  void BuildForeignInitFunction() {
+    foreign_init_function_index_ = builder_->AddFunction();
+    FunctionSig::Builder b(zone(), 0, foreign_variables_.size());
+    for (auto i = foreign_variables_.begin(); i != foreign_variables_.end();
+         ++i) {
+      b.AddParam(i->type);
+    }
+    current_function_builder_ =
+        builder_->FunctionAt(foreign_init_function_index_);
+    current_function_builder_->SetExported();
+    std::string raw_name = "__foreign_init__";
+    current_function_builder_->SetName(raw_name.data(),
+                                       static_cast<int>(raw_name.size()));
+    current_function_builder_->SetSignature(b.Build());
+    for (size_t pos = 0; pos < foreign_variables_.size(); ++pos) {
+      current_function_builder_->EmitGetLocal(static_cast<uint32_t>(pos));
+      ForeignVariable* fv = &foreign_variables_[pos];
+      uint32_t index = LookupOrInsertGlobal(fv->var, fv->type);
+      current_function_builder_->EmitWithVarInt(kExprStoreGlobal, index);
+    }
+    current_function_builder_ = nullptr;
   }
 
-  void VisitVariableDeclaration(VariableDeclaration* decl) {}
+  i::Handle<i::FixedArray> GetForeignArgs() {
+    i::Handle<FixedArray> ret = isolate_->factory()->NewFixedArray(
+        static_cast<int>(foreign_variables_.size()));
+    for (size_t i = 0; i < foreign_variables_.size(); ++i) {
+      ForeignVariable* fv = &foreign_variables_[i];
+      ret->set(static_cast<int>(i), *fv->name);
+    }
+    return ret;
+  }
 
-  void VisitFunctionDeclaration(FunctionDeclaration* decl) {
+  void Build() {
+    InitializeInitFunction();
+    RECURSE(VisitFunctionLiteral(literal_));
+    BuildForeignInitFunction();
+  }
+
+  void VisitVariableDeclaration(VariableDeclaration* decl) override {}
+
+  void VisitFunctionDeclaration(FunctionDeclaration* decl) override {
     DCHECK_EQ(kModuleScope, scope_);
     DCHECK_NULL(current_function_builder_);
     uint32_t index = LookupOrInsertFunction(decl->proxy()->var());
@@ -93,11 +135,11 @@
     local_variables_.Clear();
   }
 
-  void VisitImportDeclaration(ImportDeclaration* decl) {}
+  void VisitImportDeclaration(ImportDeclaration* decl) override {}
 
-  void VisitExportDeclaration(ExportDeclaration* decl) {}
+  void VisitExportDeclaration(ExportDeclaration* decl) override {}
 
-  void VisitStatements(ZoneList<Statement*>* stmts) {
+  void VisitStatements(ZoneList<Statement*>* stmts) override {
     for (int i = 0; i < stmts->length(); ++i) {
       Statement* stmt = stmts->at(i);
       ExpressionStatement* e = stmt->AsExpressionStatement();
@@ -109,7 +151,7 @@
     }
   }
 
-  void VisitBlock(Block* stmt) {
+  void VisitBlock(Block* stmt) override {
     if (stmt->statements()->length() == 1) {
       ExpressionStatement* expr =
           stmt->statements()->at(0)->AsExpressionStatement();
@@ -146,15 +188,17 @@
     }
   };
 
-  void VisitExpressionStatement(ExpressionStatement* stmt) {
+  void VisitExpressionStatement(ExpressionStatement* stmt) override {
     RECURSE(Visit(stmt->expression()));
   }
 
-  void VisitEmptyStatement(EmptyStatement* stmt) {}
+  void VisitEmptyStatement(EmptyStatement* stmt) override {}
 
-  void VisitEmptyParentheses(EmptyParentheses* paren) { UNREACHABLE(); }
+  void VisitEmptyParentheses(EmptyParentheses* paren) override {
+    UNREACHABLE();
+  }
 
-  void VisitIfStatement(IfStatement* stmt) {
+  void VisitIfStatement(IfStatement* stmt) override {
     DCHECK_EQ(kFuncScope, scope_);
     RECURSE(Visit(stmt->condition()));
     current_function_builder_->Emit(kExprIf);
@@ -171,7 +215,7 @@
     breakable_blocks_.pop_back();
   }
 
-  void VisitContinueStatement(ContinueStatement* stmt) {
+  void VisitContinueStatement(ContinueStatement* stmt) override {
     DCHECK_EQ(kFuncScope, scope_);
     DCHECK_NOT_NULL(stmt->target());
     int i = static_cast<int>(breakable_blocks_.size()) - 1;
@@ -192,7 +236,7 @@
     current_function_builder_->EmitVarInt(block_distance);
   }
 
-  void VisitBreakStatement(BreakStatement* stmt) {
+  void VisitBreakStatement(BreakStatement* stmt) override {
     DCHECK_EQ(kFuncScope, scope_);
     DCHECK_NOT_NULL(stmt->target());
     int i = static_cast<int>(breakable_blocks_.size()) - 1;
@@ -215,7 +259,7 @@
     current_function_builder_->EmitVarInt(block_distance);
   }
 
-  void VisitReturnStatement(ReturnStatement* stmt) {
+  void VisitReturnStatement(ReturnStatement* stmt) override {
     if (scope_ == kModuleScope) {
       scope_ = kExportScope;
       RECURSE(Visit(stmt->expression()));
@@ -230,7 +274,7 @@
     }
   }
 
-  void VisitWithStatement(WithStatement* stmt) { UNREACHABLE(); }
+  void VisitWithStatement(WithStatement* stmt) override { UNREACHABLE(); }
 
   void HandleCase(CaseNode* node,
                   const ZoneMap<int, unsigned int>& case_to_block,
@@ -298,7 +342,7 @@
     }
   }
 
-  void VisitSwitchStatement(SwitchStatement* stmt) {
+  void VisitSwitchStatement(SwitchStatement* stmt) override {
     VariableProxy* tag = stmt->tag()->AsVariableProxy();
     DCHECK_NOT_NULL(tag);
     ZoneList<CaseClause*>* clauses = stmt->cases();
@@ -341,7 +385,7 @@
         current_function_builder_->EmitVarInt(default_block);
       }
     }
-    for (int i = 0; i < case_count; i++) {
+    for (int i = 0; i < case_count; ++i) {
       CaseClause* clause = clauses->at(i);
       RECURSE(VisitStatements(clause->statements()));
       BlockVisitor* v = blocks.at(case_count - i - 1);
@@ -350,9 +394,9 @@
     }
   }
 
-  void VisitCaseClause(CaseClause* clause) { UNREACHABLE(); }
+  void VisitCaseClause(CaseClause* clause) override { UNREACHABLE(); }
 
-  void VisitDoWhileStatement(DoWhileStatement* stmt) {
+  void VisitDoWhileStatement(DoWhileStatement* stmt) override {
     DCHECK_EQ(kFuncScope, scope_);
     BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true);
     RECURSE(Visit(stmt->body()));
@@ -362,7 +406,7 @@
     current_function_builder_->Emit(kExprEnd);
   }
 
-  void VisitWhileStatement(WhileStatement* stmt) {
+  void VisitWhileStatement(WhileStatement* stmt) override {
     DCHECK_EQ(kFuncScope, scope_);
     BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true);
     RECURSE(Visit(stmt->cond()));
@@ -374,7 +418,7 @@
     breakable_blocks_.pop_back();
   }
 
-  void VisitForStatement(ForStatement* stmt) {
+  void VisitForStatement(ForStatement* stmt) override {
     DCHECK_EQ(kFuncScope, scope_);
     if (stmt->init() != nullptr) {
       RECURSE(Visit(stmt->init()));
@@ -398,17 +442,23 @@
     current_function_builder_->EmitWithU8U8(kExprBr, ARITY_0, 0);
   }
 
-  void VisitForInStatement(ForInStatement* stmt) { UNREACHABLE(); }
+  void VisitForInStatement(ForInStatement* stmt) override { UNREACHABLE(); }
 
-  void VisitForOfStatement(ForOfStatement* stmt) { UNREACHABLE(); }
+  void VisitForOfStatement(ForOfStatement* stmt) override { UNREACHABLE(); }
 
-  void VisitTryCatchStatement(TryCatchStatement* stmt) { UNREACHABLE(); }
+  void VisitTryCatchStatement(TryCatchStatement* stmt) override {
+    UNREACHABLE();
+  }
 
-  void VisitTryFinallyStatement(TryFinallyStatement* stmt) { UNREACHABLE(); }
+  void VisitTryFinallyStatement(TryFinallyStatement* stmt) override {
+    UNREACHABLE();
+  }
 
-  void VisitDebuggerStatement(DebuggerStatement* stmt) { UNREACHABLE(); }
+  void VisitDebuggerStatement(DebuggerStatement* stmt) override {
+    UNREACHABLE();
+  }
 
-  void VisitFunctionLiteral(FunctionLiteral* expr) {
+  void VisitFunctionLiteral(FunctionLiteral* expr) override {
     Scope* scope = expr->scope();
     if (scope_ == kFuncScope) {
       if (bounds_->get(expr).lower->IsFunction()) {
@@ -418,7 +468,7 @@
         FunctionSig::Builder b(zone(), return_type == kAstStmt ? 0 : 1,
                                func_type->Arity());
         if (return_type != kAstStmt) b.AddReturn(return_type);
-        for (int i = 0; i < expr->parameter_count(); i++) {
+        for (int i = 0; i < expr->parameter_count(); ++i) {
           LocalType type = TypeFrom(func_type->Parameter(i));
           DCHECK_NE(kAstStmt, type);
           b.AddParam(type);
@@ -433,11 +483,11 @@
     RECURSE(VisitDeclarations(scope->declarations()));
   }
 
-  void VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
+  void VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) override {
     UNREACHABLE();
   }
 
-  void VisitConditional(Conditional* expr) {
+  void VisitConditional(Conditional* expr) override {
     DCHECK_EQ(kFuncScope, scope_);
     RECURSE(Visit(expr->condition()));
     // WASM ifs come with implicit blocks for both arms.
@@ -502,7 +552,7 @@
     return true;
   }
 
-  void VisitVariableProxy(VariableProxy* expr) {
+  void VisitVariableProxy(VariableProxy* expr) override {
     if (scope_ == kFuncScope || scope_ == kInitScope) {
       Variable* var = expr->var();
       if (VisitStdlibConstant(var)) {
@@ -520,7 +570,7 @@
     }
   }
 
-  void VisitLiteral(Literal* expr) {
+  void VisitLiteral(Literal* expr) override {
     Handle<Object> value = expr->value();
     if (!value->IsNumber() || (scope_ != kFuncScope && scope_ != kInitScope)) {
       return;
@@ -550,9 +600,9 @@
     }
   }
 
-  void VisitRegExpLiteral(RegExpLiteral* expr) { UNREACHABLE(); }
+  void VisitRegExpLiteral(RegExpLiteral* expr) override { UNREACHABLE(); }
 
-  void VisitObjectLiteral(ObjectLiteral* expr) {
+  void VisitObjectLiteral(ObjectLiteral* expr) override {
     ZoneList<ObjectLiteralProperty*>* props = expr->properties();
     for (int i = 0; i < props->length(); ++i) {
       ObjectLiteralProperty* prop = props->at(i);
@@ -566,7 +616,7 @@
       const AstRawString* raw_name = name->AsRawPropertyName();
       if (var->is_function()) {
         uint32_t index = LookupOrInsertFunction(var);
-        builder_->FunctionAt(index)->Exported(1);
+        builder_->FunctionAt(index)->SetExported();
         builder_->FunctionAt(index)->SetName(
             reinterpret_cast<const char*>(raw_name->raw_data()),
             raw_name->length());
@@ -574,7 +624,7 @@
     }
   }
 
-  void VisitArrayLiteral(ArrayLiteral* expr) { UNREACHABLE(); }
+  void VisitArrayLiteral(ArrayLiteral* expr) override { UNREACHABLE(); }
 
   void LoadInitFunction() {
     current_function_builder_ = builder_->FunctionAt(init_function_index_);
@@ -595,13 +645,13 @@
     if (return_type != kAstStmt) {
       sig.AddReturn(static_cast<LocalType>(return_type));
     }
-    for (int i = 0; i < func_type->Arity(); i++) {
+    for (int i = 0; i < func_type->Arity(); ++i) {
       sig.AddParam(TypeFrom(func_type->Parameter(i)));
     }
     uint32_t signature_index = builder_->AddSignature(sig.Build());
     InsertFunctionTable(table->var(), next_table_index_, signature_index);
     next_table_index_ += funcs->values()->length();
-    for (int i = 0; i < funcs->values()->length(); i++) {
+    for (int i = 0; i < funcs->values()->length(); ++i) {
       VariableProxy* func = funcs->values()->at(i)->AsVariableProxy();
       DCHECK_NOT_NULL(func);
       builder_->AddIndirectFunction(LookupOrInsertFunction(func->var()));
@@ -646,7 +696,8 @@
 
    public:
     explicit ImportedFunctionTable(AsmWasmBuilderImpl* builder)
-        : table_(HashMap::PointersMatch, ZoneHashMap::kDefaultHashMapCapacity,
+        : table_(base::HashMap::PointersMatch,
+                 ZoneHashMap::kDefaultHashMapCapacity,
                  ZoneAllocationPolicy(builder->zone())),
           builder_(builder) {}
 
@@ -706,13 +757,17 @@
           DCHECK(binop->right()->IsLiteral());
           DCHECK_EQ(1.0, binop->right()->AsLiteral()->raw_value()->AsNumber());
           DCHECK(binop->right()->AsLiteral()->raw_value()->ContainsDot());
-          VisitForeignVariable(true, prop);
+          DCHECK(target->IsVariableProxy());
+          VisitForeignVariable(true, target->AsVariableProxy()->var(), prop);
+          *is_nop = true;
           return;
         } else if (binop->op() == Token::BIT_OR) {
           DCHECK(binop->right()->IsLiteral());
           DCHECK_EQ(0.0, binop->right()->AsLiteral()->raw_value()->AsNumber());
           DCHECK(!binop->right()->AsLiteral()->raw_value()->ContainsDot());
-          VisitForeignVariable(false, prop);
+          DCHECK(target->IsVariableProxy());
+          VisitForeignVariable(false, target->AsVariableProxy()->var(), prop);
+          *is_nop = true;
           return;
         } else {
           UNREACHABLE();
@@ -784,7 +839,7 @@
     }
   }
 
-  void VisitAssignment(Assignment* expr) {
+  void VisitAssignment(Assignment* expr) override {
     bool as_init = false;
     if (scope_ == kModuleScope) {
       Property* prop = expr->value()->AsProperty();
@@ -831,55 +886,22 @@
     if (as_init) UnLoadInitFunction();
   }
 
-  void VisitYield(Yield* expr) { UNREACHABLE(); }
+  void VisitYield(Yield* expr) override { UNREACHABLE(); }
 
-  void VisitThrow(Throw* expr) { UNREACHABLE(); }
+  void VisitThrow(Throw* expr) override { UNREACHABLE(); }
 
-  void VisitForeignVariable(bool is_float, Property* expr) {
+  void VisitForeignVariable(bool is_float, Variable* var, Property* expr) {
     DCHECK(expr->obj()->AsVariableProxy());
     DCHECK(VariableLocation::PARAMETER ==
            expr->obj()->AsVariableProxy()->var()->location());
     DCHECK_EQ(1, expr->obj()->AsVariableProxy()->var()->index());
     Literal* key_literal = expr->key()->AsLiteral();
     DCHECK_NOT_NULL(key_literal);
-    if (!key_literal->value().is_null() && !foreign_.is_null() &&
-        foreign_->IsObject()) {
+    if (!key_literal->value().is_null()) {
       Handle<Name> name =
           i::Object::ToName(isolate_, key_literal->value()).ToHandleChecked();
-      MaybeHandle<Object> maybe_value = i::Object::GetProperty(foreign_, name);
-      if (!maybe_value.is_null()) {
-        Handle<Object> value = maybe_value.ToHandleChecked();
-        if (is_float) {
-          MaybeHandle<Object> maybe_nvalue = i::Object::ToNumber(value);
-          if (!maybe_nvalue.is_null()) {
-            Handle<Object> nvalue = maybe_nvalue.ToHandleChecked();
-            if (nvalue->IsNumber()) {
-              double val = nvalue->Number();
-              byte code[] = {WASM_F64(val)};
-              current_function_builder_->EmitCode(code, sizeof(code));
-              return;
-            }
-          }
-        } else {
-          MaybeHandle<Object> maybe_nvalue =
-              i::Object::ToInt32(isolate_, value);
-          if (!maybe_nvalue.is_null()) {
-            Handle<Object> nvalue = maybe_nvalue.ToHandleChecked();
-            if (nvalue->IsNumber()) {
-              int32_t val = static_cast<int32_t>(nvalue->Number());
-              current_function_builder_->EmitI32Const(val);
-              return;
-            }
-          }
-        }
-      }
-    }
-    if (is_float) {
-      byte code[] = {WASM_F64(std::numeric_limits<double>::quiet_NaN())};
-      current_function_builder_->EmitCode(code, sizeof(code));
-    } else {
-      byte code[] = {WASM_I32V_1(0)};
-      current_function_builder_->EmitCode(code, sizeof(code));
+      LocalType type = is_float ? kAstF64 : kAstI32;
+      foreign_variables_.push_back({name, var, type});
     }
   }
 
@@ -954,7 +976,7 @@
     UNREACHABLE();
   }
 
-  void VisitProperty(Property* expr) {
+  void VisitProperty(Property* expr) override {
     MachineType type;
     VisitPropertyAndEmitIndex(expr, &type);
     WasmOpcode opcode;
@@ -1242,7 +1264,7 @@
     }
   }
 
-  void VisitCall(Call* expr) {
+  void VisitCall(Call* expr) override {
     Call::CallType call_type = expr->GetCallType(isolate_);
     switch (call_type) {
       case Call::OTHER_CALL: {
@@ -1264,7 +1286,7 @@
           if (return_type != kAstStmt) {
             sig.AddReturn(return_type);
           }
-          for (int i = 0; i < args->length(); i++) {
+          for (int i = 0; i < args->length(); ++i) {
             sig.AddParam(TypeOf(args->at(i)));
           }
           index =
@@ -1303,11 +1325,11 @@
     }
   }
 
-  void VisitCallNew(CallNew* expr) { UNREACHABLE(); }
+  void VisitCallNew(CallNew* expr) override { UNREACHABLE(); }
 
-  void VisitCallRuntime(CallRuntime* expr) { UNREACHABLE(); }
+  void VisitCallRuntime(CallRuntime* expr) override { UNREACHABLE(); }
 
-  void VisitUnaryOperation(UnaryOperation* expr) {
+  void VisitUnaryOperation(UnaryOperation* expr) override {
     RECURSE(Visit(expr->expression()));
     switch (expr->op()) {
       case Token::NOT: {
@@ -1320,7 +1342,7 @@
     }
   }
 
-  void VisitCountOperation(CountOperation* expr) { UNREACHABLE(); }
+  void VisitCountOperation(CountOperation* expr) override { UNREACHABLE(); }
 
   bool MatchIntBinaryOperation(BinaryOperation* expr, Token::Value op,
                                int32_t val) {
@@ -1457,7 +1479,7 @@
     }
   }
 
-  void VisitBinaryOperation(BinaryOperation* expr) {
+  void VisitBinaryOperation(BinaryOperation* expr) override {
     ConvertOperation convertOperation = MatchBinaryOperation(expr);
     if (convertOperation == kToDouble) {
       RECURSE(Visit(expr->left()));
@@ -1535,7 +1557,7 @@
     }
   }
 
-  void VisitCompareOperation(CompareOperation* expr) {
+  void VisitCompareOperation(CompareOperation* expr) override {
     RECURSE(Visit(expr->left()));
     RECURSE(Visit(expr->right()));
     switch (expr->op()) {
@@ -1606,32 +1628,37 @@
 #undef SIGNED
 #undef NON_SIGNED
 
-  void VisitThisFunction(ThisFunction* expr) { UNREACHABLE(); }
+  void VisitThisFunction(ThisFunction* expr) override { UNREACHABLE(); }
 
-  void VisitDeclarations(ZoneList<Declaration*>* decls) {
+  void VisitDeclarations(ZoneList<Declaration*>* decls) override {
     for (int i = 0; i < decls->length(); ++i) {
       Declaration* decl = decls->at(i);
       RECURSE(Visit(decl));
     }
   }
 
-  void VisitClassLiteral(ClassLiteral* expr) { UNREACHABLE(); }
+  void VisitClassLiteral(ClassLiteral* expr) override { UNREACHABLE(); }
 
-  void VisitSpread(Spread* expr) { UNREACHABLE(); }
+  void VisitSpread(Spread* expr) override { UNREACHABLE(); }
 
-  void VisitSuperPropertyReference(SuperPropertyReference* expr) {
+  void VisitSuperPropertyReference(SuperPropertyReference* expr) override {
     UNREACHABLE();
   }
 
-  void VisitSuperCallReference(SuperCallReference* expr) { UNREACHABLE(); }
-
-  void VisitSloppyBlockFunctionStatement(SloppyBlockFunctionStatement* expr) {
+  void VisitSuperCallReference(SuperCallReference* expr) override {
     UNREACHABLE();
   }
 
-  void VisitDoExpression(DoExpression* expr) { UNREACHABLE(); }
+  void VisitSloppyBlockFunctionStatement(
+      SloppyBlockFunctionStatement* expr) override {
+    UNREACHABLE();
+  }
 
-  void VisitRewritableExpression(RewritableExpression* expr) { UNREACHABLE(); }
+  void VisitDoExpression(DoExpression* expr) override { UNREACHABLE(); }
+
+  void VisitRewritableExpression(RewritableExpression* expr) override {
+    UNREACHABLE();
+  }
 
   struct IndexContainer : public ZoneObject {
     uint32_t index;
@@ -1724,11 +1751,12 @@
   FunctionLiteral* literal_;
   Isolate* isolate_;
   Zone* zone_;
-  Handle<Object> foreign_;
   AsmTyper* typer_;
   TypeCache const& cache_;
   ZoneVector<std::pair<BreakableStatement*, bool>> breakable_blocks_;
+  ZoneVector<ForeignVariable> foreign_variables_;
   uint32_t init_function_index_;
+  uint32_t foreign_init_function_index_;
   uint32_t next_table_index_;
   ZoneHashMap function_tables_;
   ImportedFunctionTable imported_function_table_;
@@ -1741,21 +1769,18 @@
 };
 
 AsmWasmBuilder::AsmWasmBuilder(Isolate* isolate, Zone* zone,
-                               FunctionLiteral* literal, Handle<Object> foreign,
-                               AsmTyper* typer)
-    : isolate_(isolate),
-      zone_(zone),
-      literal_(literal),
-      foreign_(foreign),
-      typer_(typer) {}
+                               FunctionLiteral* literal, AsmTyper* typer)
+    : isolate_(isolate), zone_(zone), literal_(literal), typer_(typer) {}
 
 // TODO(aseemgarg): probably should take zone (to write wasm to) as input so
 // that zone in constructor may be thrown away once wasm module is written.
-WasmModuleIndex* AsmWasmBuilder::Run() {
-  AsmWasmBuilderImpl impl(isolate_, zone_, literal_, foreign_, typer_);
-  impl.Compile();
-  WasmModuleWriter* writer = impl.builder_->Build(zone_);
-  return writer->WriteTo(zone_);
+ZoneBuffer* AsmWasmBuilder::Run(i::Handle<i::FixedArray>* foreign_args) {
+  AsmWasmBuilderImpl impl(isolate_, zone_, literal_, typer_);
+  impl.Build();
+  *foreign_args = impl.GetForeignArgs();
+  ZoneBuffer* buffer = new (zone_) ZoneBuffer(zone_);
+  impl.builder_->WriteTo(*buffer);
+  return buffer;
 }
 }  // namespace wasm
 }  // namespace internal
diff --git a/src/wasm/asm-wasm-builder.h b/src/wasm/asm-wasm-builder.h
index 09645ee..b99c3ef 100644
--- a/src/wasm/asm-wasm-builder.h
+++ b/src/wasm/asm-wasm-builder.h
@@ -21,14 +21,13 @@
 class AsmWasmBuilder {
  public:
   explicit AsmWasmBuilder(Isolate* isolate, Zone* zone, FunctionLiteral* root,
-                          Handle<Object> foreign, AsmTyper* typer);
-  WasmModuleIndex* Run();
+                          AsmTyper* typer);
+  ZoneBuffer* Run(Handle<FixedArray>* foreign_args);
 
  private:
   Isolate* isolate_;
   Zone* zone_;
   FunctionLiteral* literal_;
-  Handle<Object> foreign_;
   AsmTyper* typer_;
 };
 }  // namespace wasm
diff --git a/src/wasm/ast-decoder.cc b/src/wasm/ast-decoder.cc
index b8a86c3..ef83c67 100644
--- a/src/wasm/ast-decoder.cc
+++ b/src/wasm/ast-decoder.cc
@@ -157,10 +157,17 @@
     return false;
   }
 
-  inline bool Validate(const byte* pc, CallFunctionOperand& operand) {
+  inline bool Complete(const byte* pc, CallFunctionOperand& operand) {
     ModuleEnv* m = module_;
     if (m && m->module && operand.index < m->module->functions.size()) {
       operand.sig = m->module->functions[operand.index].sig;
+      return true;
+    }
+    return false;
+  }
+
+  inline bool Validate(const byte* pc, CallFunctionOperand& operand) {
+    if (Complete(pc, operand)) {
       uint32_t expected = static_cast<uint32_t>(operand.sig->parameter_count());
       if (operand.arity != expected) {
         error(pc, pc + 1,
@@ -174,10 +181,17 @@
     return false;
   }
 
-  inline bool Validate(const byte* pc, CallIndirectOperand& operand) {
+  inline bool Complete(const byte* pc, CallIndirectOperand& operand) {
     ModuleEnv* m = module_;
     if (m && m->module && operand.index < m->module->signatures.size()) {
       operand.sig = m->module->signatures[operand.index];
+      return true;
+    }
+    return false;
+  }
+
+  inline bool Validate(const byte* pc, CallIndirectOperand& operand) {
+    if (Complete(pc, operand)) {
       uint32_t expected = static_cast<uint32_t>(operand.sig->parameter_count());
       if (operand.arity != expected) {
         error(pc, pc + 1,
@@ -191,10 +205,17 @@
     return false;
   }
 
-  inline bool Validate(const byte* pc, CallImportOperand& operand) {
+  inline bool Complete(const byte* pc, CallImportOperand& operand) {
     ModuleEnv* m = module_;
     if (m && m->module && operand.index < m->module->import_table.size()) {
       operand.sig = m->module->import_table[operand.index].sig;
+      return true;
+    }
+    return false;
+  }
+
+  inline bool Validate(const byte* pc, CallImportOperand& operand) {
+    if (Complete(pc, operand)) {
       uint32_t expected = static_cast<uint32_t>(operand.sig->parameter_count());
       if (operand.arity != expected) {
         error(pc, pc + 1, "arity mismatch in import call (expected %u, got %u)",
@@ -228,7 +249,7 @@
       return false;
     }
     // Verify table.
-    for (uint32_t i = 0; i < operand.table_count + 1; i++) {
+    for (uint32_t i = 0; i < operand.table_count + 1; ++i) {
       uint32_t target = operand.read_entry(this, i);
       if (target >= block_depth) {
         error(operand.table + i * 2, "improper branch in br_table");
@@ -238,7 +259,7 @@
     return true;
   }
 
-  int OpcodeArity(const byte* pc) {
+  unsigned OpcodeArity(const byte* pc) {
 #define DECLARE_ARITY(name, ...)                          \
   static const LocalType kTypes_##name[] = {__VA_ARGS__}; \
   static const int kArity_##name =                        \
@@ -311,6 +332,7 @@
         FOREACH_MISC_MEM_OPCODE(DECLARE_OPCODE_CASE)
         FOREACH_SIMPLE_OPCODE(DECLARE_OPCODE_CASE)
         FOREACH_ASMJS_COMPAT_OPCODE(DECLARE_OPCODE_CASE)
+        FOREACH_SIMD_OPCODE(DECLARE_OPCODE_CASE)
 #undef DECLARE_OPCODE_CASE
       default:
         UNREACHABLE();
@@ -318,7 +340,7 @@
     }
   }
 
-  int OpcodeLength(const byte* pc) {
+  unsigned OpcodeLength(const byte* pc) {
     switch (static_cast<WasmOpcode>(*pc)) {
 #define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
       FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
@@ -390,7 +412,7 @@
 // shift-reduce strategy with multiple internal stacks.
 class SR_WasmDecoder : public WasmDecoder {
  public:
-  SR_WasmDecoder(Zone* zone, TFBuilder* builder, FunctionBody& body)
+  SR_WasmDecoder(Zone* zone, TFBuilder* builder, const FunctionBody& body)
       : WasmDecoder(body.module, body.sig, body.start, body.end),
         zone_(zone),
         builder_(builder),
@@ -543,7 +565,7 @@
   char* indentation() {
     static const int kMaxIndent = 64;
     static char bytes[kMaxIndent + 1];
-    for (int i = 0; i < kMaxIndent; i++) bytes[i] = ' ';
+    for (int i = 0; i < kMaxIndent; ++i) bytes[i] = ' ';
     bytes[kMaxIndent] = 0;
     if (stack_.size() < kMaxIndent / 2) {
       bytes[stack_.size() * 2] = 0;
@@ -557,15 +579,14 @@
     // Initialize {local_type_vec} from signature.
     if (sig_) {
       local_type_vec_.reserve(sig_->parameter_count());
-      for (size_t i = 0; i < sig_->parameter_count(); i++) {
+      for (size_t i = 0; i < sig_->parameter_count(); ++i) {
         local_type_vec_.push_back(sig_->GetParam(i));
       }
     }
     // Decode local declarations, if any.
-    int length;
-    uint32_t entries = consume_u32v(&length, "local decls count");
+    uint32_t entries = consume_u32v("local decls count");
     while (entries-- > 0 && pc_ < limit_) {
-      uint32_t count = consume_u32v(&length, "local count");
+      uint32_t count = consume_u32v("local count");
       byte code = consume_u8("local type");
       LocalType type;
       switch (code) {
@@ -600,7 +621,7 @@
     if (pc_ >= limit_) return;  // Nothing to do.
 
     while (true) {  // decoding loop.
-      int len = 1;
+      unsigned len = 1;
       WasmOpcode opcode = static_cast<WasmOpcode>(*pc_);
       TRACE("  @%-6d #%02x:%-20s|", startrel(pc_), opcode,
             WasmOpcodes::ShortOpcodeName(opcode));
@@ -792,7 +813,7 @@
 
                 SsaEnv* copy = Steal(break_env);
                 ssa_env_ = copy;
-                for (uint32_t i = 0; i < operand.table_count + 1; i++) {
+                for (uint32_t i = 0; i < operand.table_count + 1; ++i) {
                   uint16_t target = operand.read_entry(this, i);
                   ssa_env_ = Split(copy);
                   ssa_env_->control = (i == operand.table_count)
@@ -1023,7 +1044,7 @@
 
 #if DEBUG
       if (FLAG_trace_wasm_decoder) {
-        for (size_t i = 0; i < stack_.size(); i++) {
+        for (size_t i = 0; i < stack_.size(); ++i) {
           Value& val = stack_[i];
           WasmOpcode opcode = static_cast<WasmOpcode>(*val.pc);
           PrintF(" %c@%d:%s", WasmOpcodes::ShortNameOf(val.type),
@@ -1104,8 +1125,8 @@
   int DecodeLoadMem(LocalType type, MachineType mem_type) {
     MemoryAccessOperand operand(this, pc_);
     Value index = Pop(0, kAstI32);
-    TFNode* node =
-        BUILD(LoadMem, type, mem_type, index.node, operand.offset, position());
+    TFNode* node = BUILD(LoadMem, type, mem_type, index.node, operand.offset,
+                         operand.alignment, position());
     Push(type, node);
     return 1 + operand.length;
   }
@@ -1114,7 +1135,8 @@
     MemoryAccessOperand operand(this, pc_);
     Value val = Pop(1, type);
     Value index = Pop(0, kAstI32);
-    BUILD(StoreMem, mem_type, index.node, operand.offset, val.node, position());
+    BUILD(StoreMem, mem_type, index.node, operand.offset, operand.alignment,
+          val.node, position());
     Push(type, val.node);
     return 1 + operand.length;
   }
@@ -1433,9 +1455,9 @@
         new (zone_) BitVector(static_cast<int>(local_type_vec_.size()), zone_);
     int depth = 0;
     // Iteratively process all AST nodes nested inside the loop.
-    while (pc < limit_) {
+    while (pc < limit_ && ok()) {
       WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
-      int length = 1;
+      unsigned length = 1;
       switch (opcode) {
         case kExprLoop:
         case kExprIf:
@@ -1463,7 +1485,7 @@
       if (depth <= 0) break;
       pc += length;
     }
-    return assigned;
+    return ok() ? assigned : nullptr;
   }
 
   inline wasm::WasmCodePosition position() {
@@ -1505,7 +1527,7 @@
   }
   PrintF("%s", WasmOpcodes::OpcodeName(tree.opcode()));
   if (tree.count > 0) os << "(";
-  for (uint32_t i = 0; i < tree.count; i++) {
+  for (uint32_t i = 0; i < tree.count; ++i) {
     if (i > 0) os << ", ";
     os << *tree.children[i];
   }
@@ -1513,31 +1535,33 @@
   return os;
 }
 
-int OpcodeLength(const byte* pc, const byte* end) {
+unsigned OpcodeLength(const byte* pc, const byte* end) {
   WasmDecoder decoder(nullptr, nullptr, pc, end);
   return decoder.OpcodeLength(pc);
 }
 
-int OpcodeArity(const byte* pc, const byte* end) {
+unsigned OpcodeArity(const byte* pc, const byte* end) {
   WasmDecoder decoder(nullptr, nullptr, pc, end);
   return decoder.OpcodeArity(pc);
 }
 
 void PrintAstForDebugging(const byte* start, const byte* end) {
-  FunctionBody body = {nullptr, nullptr, start, start, end};
   base::AccountingAllocator allocator;
-  PrintAst(&allocator, body);
+  OFStream os(stdout);
+  PrintAst(&allocator, FunctionBodyForTesting(start, end), os, nullptr);
 }
 
-void PrintAst(base::AccountingAllocator* allocator, FunctionBody& body) {
+bool PrintAst(base::AccountingAllocator* allocator, const FunctionBody& body,
+              std::ostream& os,
+              std::vector<std::tuple<uint32_t, int, int>>* offset_table) {
   Zone zone(allocator);
   SR_WasmDecoder decoder(&zone, nullptr, body);
-
-  OFStream os(stdout);
+  int line_nr = 0;
 
   // Print the function signature.
   if (body.sig) {
     os << "// signature: " << *body.sig << std::endl;
+    ++line_nr;
   }
 
   // Print the local declarations.
@@ -1554,24 +1578,35 @@
     os << std::endl;
 
     for (const byte* locals = body.start; locals < pc; locals++) {
-      printf(" 0x%02x,", *locals);
+      os << (locals == body.start ? "0x" : " 0x") << AsHex(*locals, 2) << ",";
     }
     os << std::endl;
+    ++line_nr;
   }
 
-  os << "// body: \n";
-  int control_depth = 0;
+  os << "// body: " << std::endl;
+  ++line_nr;
+  unsigned control_depth = 0;
   while (pc < body.end) {
-    size_t length = decoder.OpcodeLength(pc);
+    unsigned length = decoder.OpcodeLength(pc);
 
     WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
     if (opcode == kExprElse) control_depth--;
 
-    for (int i = 0; i < control_depth && i < 32; i++) printf("  ");
-    printf("k%s,", WasmOpcodes::OpcodeName(opcode));
+    int num_whitespaces = control_depth < 32 ? 2 * control_depth : 64;
+    if (offset_table) {
+      offset_table->push_back(
+          std::make_tuple(pc - body.start, line_nr, num_whitespaces));
+    }
 
-    for (size_t i = 1; i < length; i++) {
-      printf(" 0x%02x,", pc[i]);
+    // 64 whitespaces
+    const char* padding =
+        "                                                                ";
+    os.write(padding, num_whitespaces);
+    os << "k" << WasmOpcodes::OpcodeName(opcode) << ",";
+
+    for (size_t i = 1; i < length; ++i) {
+      os << " " << AsHex(pc[i], 2) << ",";
     }
 
     switch (opcode) {
@@ -1604,7 +1639,7 @@
       }
       case kExprCallIndirect: {
         CallIndirectOperand operand(&decoder, pc);
-        if (decoder.Validate(pc, operand)) {
+        if (decoder.Complete(pc, operand)) {
           os << "   // sig #" << operand.index << ": " << *operand.sig;
         } else {
           os << " // arity=" << operand.arity << " sig #" << operand.index;
@@ -1613,7 +1648,7 @@
       }
       case kExprCallImport: {
         CallImportOperand operand(&decoder, pc);
-        if (decoder.Validate(pc, operand)) {
+        if (decoder.Complete(pc, operand)) {
           os << "   // import #" << operand.index << ": " << *operand.sig;
         } else {
           os << " // arity=" << operand.arity << " import #" << operand.index;
@@ -1622,7 +1657,7 @@
       }
       case kExprCallFunction: {
         CallFunctionOperand operand(&decoder, pc);
-        if (decoder.Validate(pc, operand)) {
+        if (decoder.Complete(pc, operand)) {
           os << "   // function #" << operand.index << ": " << *operand.sig;
         } else {
           os << " // arity=" << operand.arity << " function #" << operand.index;
@@ -1640,7 +1675,10 @@
 
     pc += length;
     os << std::endl;
+    ++line_nr;
   }
+
+  return decoder.ok();
 }
 
 BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, size_t num_locals,
diff --git a/src/wasm/ast-decoder.h b/src/wasm/ast-decoder.h
index 9e96053..27170dd 100644
--- a/src/wasm/ast-decoder.h
+++ b/src/wasm/ast-decoder.h
@@ -25,7 +25,7 @@
 struct LocalIndexOperand {
   uint32_t index;
   LocalType type;
-  int length;
+  unsigned length;
 
   inline LocalIndexOperand(Decoder* decoder, const byte* pc) {
     index = decoder->checked_read_u32v(pc, 1, &length, "local index");
@@ -35,7 +35,7 @@
 
 struct ImmI8Operand {
   int8_t value;
-  int length;
+  unsigned length;
   inline ImmI8Operand(Decoder* decoder, const byte* pc) {
     value = bit_cast<int8_t>(decoder->checked_read_u8(pc, 1, "immi8"));
     length = 1;
@@ -44,7 +44,7 @@
 
 struct ImmI32Operand {
   int32_t value;
-  int length;
+  unsigned length;
   inline ImmI32Operand(Decoder* decoder, const byte* pc) {
     value = decoder->checked_read_i32v(pc, 1, &length, "immi32");
   }
@@ -52,7 +52,7 @@
 
 struct ImmI64Operand {
   int64_t value;
-  int length;
+  unsigned length;
   inline ImmI64Operand(Decoder* decoder, const byte* pc) {
     value = decoder->checked_read_i64v(pc, 1, &length, "immi64");
   }
@@ -60,7 +60,7 @@
 
 struct ImmF32Operand {
   float value;
-  int length;
+  unsigned length;
   inline ImmF32Operand(Decoder* decoder, const byte* pc) {
     value = bit_cast<float>(decoder->checked_read_u32(pc, 1, "immf32"));
     length = 4;
@@ -69,7 +69,7 @@
 
 struct ImmF64Operand {
   double value;
-  int length;
+  unsigned length;
   inline ImmF64Operand(Decoder* decoder, const byte* pc) {
     value = bit_cast<double>(decoder->checked_read_u64(pc, 1, "immf64"));
     length = 8;
@@ -80,7 +80,7 @@
   uint32_t index;
   LocalType type;
   MachineType machine_type;
-  int length;
+  unsigned length;
 
   inline GlobalIndexOperand(Decoder* decoder, const byte* pc) {
     index = decoder->checked_read_u32v(pc, 1, &length, "global index");
@@ -94,10 +94,10 @@
   uint32_t arity;
   uint32_t depth;
   Control* target;
-  int length;
+  unsigned length;
   inline BreakDepthOperand(Decoder* decoder, const byte* pc) {
-    int len1 = 0;
-    int len2 = 0;
+    unsigned len1 = 0;
+    unsigned len2 = 0;
     arity = decoder->checked_read_u32v(pc, 1, &len1, "argument count");
     depth = decoder->checked_read_u32v(pc, 1 + len1, &len2, "break depth");
     length = len1 + len2;
@@ -109,10 +109,10 @@
   uint32_t arity;
   uint32_t index;
   FunctionSig* sig;
-  int length;
+  unsigned length;
   inline CallIndirectOperand(Decoder* decoder, const byte* pc) {
-    int len1 = 0;
-    int len2 = 0;
+    unsigned len1 = 0;
+    unsigned len2 = 0;
     arity = decoder->checked_read_u32v(pc, 1, &len1, "argument count");
     index = decoder->checked_read_u32v(pc, 1 + len1, &len2, "signature index");
     length = len1 + len2;
@@ -124,10 +124,10 @@
   uint32_t arity;
   uint32_t index;
   FunctionSig* sig;
-  int length;
+  unsigned length;
   inline CallFunctionOperand(Decoder* decoder, const byte* pc) {
-    int len1 = 0;
-    int len2 = 0;
+    unsigned len1 = 0;
+    unsigned len2 = 0;
     arity = decoder->checked_read_u32v(pc, 1, &len1, "argument count");
     index = decoder->checked_read_u32v(pc, 1 + len1, &len2, "function index");
     length = len1 + len2;
@@ -139,10 +139,10 @@
   uint32_t arity;
   uint32_t index;
   FunctionSig* sig;
-  int length;
+  unsigned length;
   inline CallImportOperand(Decoder* decoder, const byte* pc) {
-    int len1 = 0;
-    int len2 = 0;
+    unsigned len1 = 0;
+    unsigned len2 = 0;
     arity = decoder->checked_read_u32v(pc, 1, &len1, "argument count");
     index = decoder->checked_read_u32v(pc, 1 + len1, &len2, "import index");
     length = len1 + len2;
@@ -154,13 +154,17 @@
   uint32_t arity;
   uint32_t table_count;
   const byte* table;
-  int length;
+  unsigned length;
   inline BranchTableOperand(Decoder* decoder, const byte* pc) {
-    int len1 = 0;
-    int len2 = 0;
+    unsigned len1 = 0;
+    unsigned len2 = 0;
     arity = decoder->checked_read_u32v(pc, 1, &len1, "argument count");
     table_count =
         decoder->checked_read_u32v(pc, 1 + len1, &len2, "table count");
+    if (table_count > (UINT_MAX / sizeof(uint32_t)) - 1 ||
+        len1 + len2 > UINT_MAX - (table_count + 1) * sizeof(uint32_t)) {
+      decoder->error(pc, "branch table size overflow");
+    }
     length = len1 + len2 + (table_count + 1) * sizeof(uint32_t);
 
     uint32_t table_start = 1 + len1 + len2;
@@ -171,8 +175,8 @@
       table = nullptr;
     }
   }
-  inline uint32_t read_entry(Decoder* decoder, int i) {
-    DCHECK(i >= 0 && static_cast<uint32_t>(i) <= table_count);
+  inline uint32_t read_entry(Decoder* decoder, unsigned i) {
+    DCHECK(i <= table_count);
     return table ? decoder->read_u32(table + i * sizeof(uint32_t)) : 0;
   }
 };
@@ -180,12 +184,12 @@
 struct MemoryAccessOperand {
   uint32_t alignment;
   uint32_t offset;
-  int length;
+  unsigned length;
   inline MemoryAccessOperand(Decoder* decoder, const byte* pc) {
-    int alignment_length;
+    unsigned alignment_length;
     alignment =
         decoder->checked_read_u32v(pc, 1, &alignment_length, "alignment");
-    int offset_length;
+    unsigned offset_length;
     offset = decoder->checked_read_u32v(pc, 1 + alignment_length,
                                         &offset_length, "offset");
     length = alignment_length + offset_length;
@@ -194,7 +198,7 @@
 
 struct ReturnArityOperand {
   uint32_t arity;
-  int length;
+  unsigned length;
 
   inline ReturnArityOperand(Decoder* decoder, const byte* pc) {
     arity = decoder->checked_read_u32v(pc, 1, &length, "return count");
@@ -213,6 +217,11 @@
   const byte* end;    // end of the function body
 };
 
+static inline FunctionBody FunctionBodyForTesting(const byte* start,
+                                                  const byte* end) {
+  return {nullptr, nullptr, start, start, end};
+}
+
 struct Tree;
 typedef Result<Tree*> TreeResult;
 
@@ -222,7 +231,9 @@
                           FunctionBody& body);
 TreeResult BuildTFGraph(base::AccountingAllocator* allocator,
                         TFBuilder* builder, FunctionBody& body);
-void PrintAst(base::AccountingAllocator* allocator, FunctionBody& body);
+bool PrintAst(base::AccountingAllocator* allocator, const FunctionBody& body,
+              std::ostream& os,
+              std::vector<std::tuple<uint32_t, int, int>>* offset_table);
 
 // A simplified form of AST printing, e.g. from a debugger.
 void PrintAstForDebugging(const byte* start, const byte* end);
@@ -262,10 +273,10 @@
                                            const byte* start, const byte* end);
 
 // Computes the length of the opcode at the given address.
-int OpcodeLength(const byte* pc, const byte* end);
+unsigned OpcodeLength(const byte* pc, const byte* end);
 
 // Computes the arity (number of sub-nodes) of the opcode at the given address.
-int OpcodeArity(const byte* pc, const byte* end);
+unsigned OpcodeArity(const byte* pc, const byte* end);
 
 }  // namespace wasm
 }  // namespace internal
diff --git a/src/wasm/decoder.h b/src/wasm/decoder.h
index 685f5d0..c1090a8 100644
--- a/src/wasm/decoder.h
+++ b/src/wasm/decoder.h
@@ -46,7 +46,8 @@
 
   virtual ~Decoder() {}
 
-  inline bool check(const byte* base, int offset, int length, const char* msg) {
+  inline bool check(const byte* base, unsigned offset, unsigned length,
+                    const char* msg) {
     DCHECK_GE(base, start_);
     if ((base + offset + length) > limit_) {
       error(base, base + offset, "%s", msg);
@@ -56,37 +57,38 @@
   }
 
   // Reads a single 8-bit byte, reporting an error if out of bounds.
-  inline uint8_t checked_read_u8(const byte* base, int offset,
+  inline uint8_t checked_read_u8(const byte* base, unsigned offset,
                                  const char* msg = "expected 1 byte") {
     return check(base, offset, 1, msg) ? base[offset] : 0;
   }
 
   // Reads 16-bit word, reporting an error if out of bounds.
-  inline uint16_t checked_read_u16(const byte* base, int offset,
+  inline uint16_t checked_read_u16(const byte* base, unsigned offset,
                                    const char* msg = "expected 2 bytes") {
     return check(base, offset, 2, msg) ? read_u16(base + offset) : 0;
   }
 
   // Reads 32-bit word, reporting an error if out of bounds.
-  inline uint32_t checked_read_u32(const byte* base, int offset,
+  inline uint32_t checked_read_u32(const byte* base, unsigned offset,
                                    const char* msg = "expected 4 bytes") {
     return check(base, offset, 4, msg) ? read_u32(base + offset) : 0;
   }
 
   // Reads 64-bit word, reporting an error if out of bounds.
-  inline uint64_t checked_read_u64(const byte* base, int offset,
+  inline uint64_t checked_read_u64(const byte* base, unsigned offset,
                                    const char* msg = "expected 8 bytes") {
     return check(base, offset, 8, msg) ? read_u64(base + offset) : 0;
   }
 
   // Reads a variable-length unsigned integer (little endian).
-  uint32_t checked_read_u32v(const byte* base, int offset, int* length,
+  uint32_t checked_read_u32v(const byte* base, unsigned offset,
+                             unsigned* length,
                              const char* msg = "expected LEB32") {
     return checked_read_leb<uint32_t, false>(base, offset, length, msg);
   }
 
   // Reads a variable-length signed integer (little endian).
-  int32_t checked_read_i32v(const byte* base, int offset, int* length,
+  int32_t checked_read_i32v(const byte* base, unsigned offset, unsigned* length,
                             const char* msg = "expected SLEB32") {
     uint32_t result =
         checked_read_leb<uint32_t, true>(base, offset, length, msg);
@@ -100,13 +102,14 @@
   }
 
   // Reads a variable-length unsigned integer (little endian).
-  uint64_t checked_read_u64v(const byte* base, int offset, int* length,
+  uint64_t checked_read_u64v(const byte* base, unsigned offset,
+                             unsigned* length,
                              const char* msg = "expected LEB64") {
     return checked_read_leb<uint64_t, false>(base, offset, length, msg);
   }
 
   // Reads a variable-length signed integer (little endian).
-  int64_t checked_read_i64v(const byte* base, int offset, int* length,
+  int64_t checked_read_i64v(const byte* base, unsigned offset, unsigned* length,
                             const char* msg = "expected SLEB64") {
     uint64_t result =
         checked_read_leb<uint64_t, true>(base, offset, length, msg);
@@ -204,10 +207,9 @@
   }
 
   // Reads a LEB128 variable-length 32-bit integer and advances {pc_}.
-  uint32_t consume_u32v(int* length, const char* name = nullptr) {
+  uint32_t consume_u32v(const char* name = nullptr) {
     TRACE("  +%d  %-20s: ", static_cast<int>(pc_ - start_),
           name ? name : "varint");
-
     if (checkAvailable(1)) {
       const byte* pos = pc_;
       const byte* end = pc_ + 5;
@@ -224,10 +226,10 @@
         shift += 7;
       }
 
-      *length = static_cast<int>(pc_ - pos);
+      int length = static_cast<int>(pc_ - pos);
       if (pc_ == end && (b & 0x80)) {
         error(pc_ - 1, "varint too large");
-      } else if (*length == 0) {
+      } else if (length == 0) {
         error(pc_, "varint of length 0");
       } else {
         TRACE("= %u\n", result);
@@ -316,7 +318,7 @@
     } else {
       result.error_code = kSuccess;
     }
-    result.val = val;
+    result.val = std::move(val);
     return result;
   }
 
@@ -350,7 +352,7 @@
 
  private:
   template <typename IntType, bool is_signed>
-  IntType checked_read_leb(const byte* base, int offset, int* length,
+  IntType checked_read_leb(const byte* base, unsigned offset, unsigned* length,
                            const char* msg) {
     if (!check(base, offset, 1, msg)) {
       *length = 0;
@@ -371,7 +373,7 @@
       shift += 7;
     }
     DCHECK_LE(ptr - (base + offset), kMaxLength);
-    *length = static_cast<int>(ptr - (base + offset));
+    *length = static_cast<unsigned>(ptr - (base + offset));
     if (ptr == end) {
       // Check there are no bits set beyond the bitwidth of {IntType}.
       const int kExtraBits = (1 + kMaxLength * 7) - (sizeof(IntType) * 8);
diff --git a/src/wasm/encoder.cc b/src/wasm/encoder.cc
index 39a2f5a..ffd0294 100644
--- a/src/wasm/encoder.cc
+++ b/src/wasm/encoder.cc
@@ -30,73 +30,33 @@
 namespace internal {
 namespace wasm {
 
-/*TODO: add error cases for adding too many locals, too many functions and bad
-  indices in body */
-
-namespace {
-void EmitUint8(byte** b, uint8_t x) {
-  Memory::uint8_at(*b) = x;
-  *b += 1;
-}
-
-void EmitUint16(byte** b, uint16_t x) {
-  WriteUnalignedUInt16(*b, x);
-  *b += 2;
-}
-
-void EmitUint32(byte** b, uint32_t x) {
-  WriteUnalignedUInt32(*b, x);
-  *b += 4;
-}
-
-void EmitVarInt(byte** b, size_t val) {
-  LEBHelper::write_u32v(b, static_cast<uint32_t>(val));
-}
-
-// Sections all start with a size, but it's unknown at the start.
-// We generate a large varint which we then fixup later when the size is known.
-//
-// TODO(jfb) Not strictly necessary since sizes are calculated ahead of time.
-const size_t kPaddedVarintSize = 5;
-
-void FixupSection(byte* start, byte* end) {
-  // Same as LEBHelper::write_u32v, but fixed-width with zeroes in the MSBs.
-  size_t val = end - start - kPaddedVarintSize;
-  TRACE("  fixup %u\n", (unsigned)val);
-  for (size_t pos = 0; pos != kPaddedVarintSize; ++pos) {
-    size_t next = val >> 7;
-    byte out = static_cast<byte>(val & 0x7f);
-    if (pos != kPaddedVarintSize - 1) {
-      *(start++) = 0x80 | out;
-      val = next;
-    } else {
-      *(start++) = out;
-      // TODO(jfb) check that the pre-allocated fixup size isn't overflowed.
-    }
-  }
-}
-
-// Returns the start of the section, where the section VarInt size is.
-byte* EmitSection(WasmSection::Code code, byte** b) {
+// Emit a section name and the size as a padded varint that can be patched
+// later.
+size_t EmitSection(WasmSection::Code code, ZoneBuffer& buffer) {
   // Emit the section name.
   const char* name = WasmSection::getName(code);
   TRACE("emit section: %s\n", name);
   size_t length = WasmSection::getNameLength(code);
-  EmitVarInt(b, length);  // Section name string size.
-  for (size_t i = 0; i != length; ++i) EmitUint8(b, name[i]);
+  buffer.write_size(length);  // Section name string size.
+  buffer.write(reinterpret_cast<const byte*>(name), length);
 
   // Emit a placeholder for the length.
-  byte* start = *b;
-  for (size_t padding = 0; padding != kPaddedVarintSize; ++padding) {
-    EmitUint8(b, 0xff);  // Will get fixed up later.
-  }
-
-  return start;
+  return buffer.reserve_u32v();
 }
-}  // namespace
 
-WasmFunctionBuilder::WasmFunctionBuilder(Zone* zone)
-    : locals_(zone), exported_(0), body_(zone), name_(zone) {}
+// Patch the size of a section after it's finished.
+void FixupSection(ZoneBuffer& buffer, size_t start) {
+  buffer.patch_u32v(start, static_cast<uint32_t>(buffer.offset() - start -
+                                                 kPaddedVarInt32Size));
+}
+
+WasmFunctionBuilder::WasmFunctionBuilder(WasmModuleBuilder* builder)
+    : builder_(builder),
+      locals_(builder->zone()),
+      signature_index_(0),
+      exported_(0),
+      body_(builder->zone()),
+      name_(builder->zone()) {}
 
 void WasmFunctionBuilder::EmitVarInt(uint32_t val) {
   byte buffer[8];
@@ -110,6 +70,7 @@
 void WasmFunctionBuilder::SetSignature(FunctionSig* sig) {
   DCHECK(!locals_.has_sig());
   locals_.set_sig(sig);
+  signature_index_ = builder_->AddSignature(sig);
 }
 
 uint32_t WasmFunctionBuilder::AddLocal(LocalType type) {
@@ -126,7 +87,7 @@
 }
 
 void WasmFunctionBuilder::EmitCode(const byte* code, uint32_t code_size) {
-  for (size_t i = 0; i < code_size; i++) {
+  for (size_t i = 0; i < code_size; ++i) {
     body_.push_back(code[i]);
   }
 }
@@ -163,98 +124,56 @@
   }
 }
 
-void WasmFunctionBuilder::Exported(uint8_t flag) { exported_ = flag; }
+void WasmFunctionBuilder::SetExported() { exported_ = true; }
 
 void WasmFunctionBuilder::SetName(const char* name, int name_length) {
   name_.clear();
   if (name_length > 0) {
-    for (int i = 0; i < name_length; i++) {
+    for (int i = 0; i < name_length; ++i) {
       name_.push_back(*(name + i));
     }
   }
 }
 
-WasmFunctionEncoder* WasmFunctionBuilder::Build(Zone* zone,
-                                                WasmModuleBuilder* mb) const {
-  WasmFunctionEncoder* e =
-      new (zone) WasmFunctionEncoder(zone, locals_, exported_);
-  // TODO(titzer): lame memcpy here.
-  e->body_.insert(e->body_.begin(), body_.begin(), body_.end());
-  e->signature_index_ = mb->AddSignature(locals_.get_sig());
-  e->name_.insert(e->name_.begin(), name_.begin(), name_.end());
-  return e;
+void WasmFunctionBuilder::WriteSignature(ZoneBuffer& buffer) const {
+  buffer.write_u32v(signature_index_);
 }
 
-WasmFunctionEncoder::WasmFunctionEncoder(Zone* zone, LocalDeclEncoder locals,
-                                         bool exported)
-    : locals_(locals), exported_(exported), body_(zone), name_(zone) {}
-
-uint32_t WasmFunctionEncoder::HeaderSize() const {
-  uint32_t size = 3;
-  size += 2;
-  if (HasName()) {
-    uint32_t name_size = NameSize();
-    size +=
-        static_cast<uint32_t>(LEBHelper::sizeof_u32v(name_size)) + name_size;
-  }
-  return size;
-}
-
-uint32_t WasmFunctionEncoder::BodySize(void) const {
-  return static_cast<uint32_t>(body_.size() + locals_.Size());
-}
-
-uint32_t WasmFunctionEncoder::NameSize() const {
-  return HasName() ? static_cast<uint32_t>(name_.size()) : 0;
-}
-
-void WasmFunctionEncoder::Serialize(byte* buffer, byte** header,
-                                    byte** body) const {
-  uint8_t decl_bits = (exported_ ? kDeclFunctionExport : 0) |
-                      (HasName() ? kDeclFunctionName : 0);
-
-  EmitUint8(header, decl_bits);
-  EmitUint16(header, signature_index_);
-
-  if (HasName()) {
-    EmitVarInt(header, NameSize());
-    for (size_t i = 0; i < name_.size(); ++i) {
-      EmitUint8(header, name_[i]);
+void WasmFunctionBuilder::WriteExport(ZoneBuffer& buffer,
+                                      uint32_t func_index) const {
+  if (exported_) {
+    buffer.write_u32v(func_index);
+    buffer.write_size(name_.size());
+    if (name_.size() > 0) {
+      buffer.write(reinterpret_cast<const byte*>(&name_[0]), name_.size());
     }
   }
+}
 
-  EmitUint16(header, static_cast<uint16_t>(body_.size() + locals_.Size()));
-  (*header) += locals_.Emit(*header);
+void WasmFunctionBuilder::WriteBody(ZoneBuffer& buffer) const {
+  size_t locals_size = locals_.Size();
+  buffer.write_size(locals_size + body_.size());
+  buffer.EnsureSpace(locals_size);
+  byte** ptr = buffer.pos_ptr();
+  locals_.Emit(*ptr);
+  (*ptr) += locals_size;  // UGLY: manual bump of position pointer
   if (body_.size() > 0) {
-    std::memcpy(*header, &body_[0], body_.size());
-    (*header) += body_.size();
+    buffer.write(&body_[0], body_.size());
   }
 }
 
 WasmDataSegmentEncoder::WasmDataSegmentEncoder(Zone* zone, const byte* data,
                                                uint32_t size, uint32_t dest)
     : data_(zone), dest_(dest) {
-  for (size_t i = 0; i < size; i++) {
+  for (size_t i = 0; i < size; ++i) {
     data_.push_back(data[i]);
   }
 }
 
-uint32_t WasmDataSegmentEncoder::HeaderSize() const {
-  static const int kDataSegmentSize = 13;
-  return kDataSegmentSize;
-}
-
-uint32_t WasmDataSegmentEncoder::BodySize() const {
-  return static_cast<uint32_t>(data_.size());
-}
-
-void WasmDataSegmentEncoder::Serialize(byte* buffer, byte** header,
-                                       byte** body) const {
-  EmitVarInt(header, dest_);
-  EmitVarInt(header, static_cast<uint32_t>(data_.size()));
-
-  std::memcpy(*header, &data_[0], data_.size());
-  (*header) += data_.size();
+void WasmDataSegmentEncoder::Write(ZoneBuffer& buffer) const {
+  buffer.write_u32v(dest_);
+  buffer.write_u32v(static_cast<uint32_t>(data_.size()));
+  buffer.write(&data_[0], data_.size());
 }
 
 WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
@@ -269,7 +188,7 @@
       start_function_index_(-1) {}
 
 uint32_t WasmModuleBuilder::AddFunction() {
-  functions_.push_back(new (zone_) WasmFunctionBuilder(zone_));
+  functions_.push_back(new (zone_) WasmFunctionBuilder(this));
   return static_cast<uint32_t>(functions_.size() - 1);
 }
 
@@ -328,260 +247,135 @@
   start_function_index_ = index;
 }
 
-WasmModuleWriter* WasmModuleBuilder::Build(Zone* zone) {
-  WasmModuleWriter* writer = new (zone) WasmModuleWriter(zone);
-  for (auto import : imports_) {
-    writer->imports_.push_back(import);
-  }
-  for (auto function : functions_) {
-    writer->functions_.push_back(function->Build(zone, this));
-  }
-  for (auto segment : data_segments_) {
-    writer->data_segments_.push_back(segment);
-  }
-  for (auto sig : signatures_) {
-    writer->signatures_.push_back(sig);
-  }
-  for (auto index : indirect_functions_) {
-    writer->indirect_functions_.push_back(index);
-  }
-  for (auto global : globals_) {
-    writer->globals_.push_back(global);
-  }
-  writer->start_function_index_ = start_function_index_;
-  return writer;
-}
-
 uint32_t WasmModuleBuilder::AddGlobal(MachineType type, bool exported) {
   globals_.push_back(std::make_pair(type, exported));
   return static_cast<uint32_t>(globals_.size() - 1);
 }
 
-WasmModuleWriter::WasmModuleWriter(Zone* zone)
-    : imports_(zone),
-      functions_(zone),
-      data_segments_(zone),
-      signatures_(zone),
-      indirect_functions_(zone),
-      globals_(zone) {}
+void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
+  uint32_t exports = 0;
 
-struct Sizes {
-  size_t header_size;
-  size_t body_size;
-
-  size_t total() { return header_size + body_size; }
-
-  void Add(size_t header, size_t body) {
-    header_size += header;
-    body_size += body;
-  }
-
-  void AddSection(WasmSection::Code code, size_t other_size) {
-    Add(kPaddedVarintSize +
-            LEBHelper::sizeof_u32v(WasmSection::getNameLength(code)) +
-            WasmSection::getNameLength(code),
-        0);
-    if (other_size) Add(LEBHelper::sizeof_u32v(other_size), 0);
-  }
-};
-
-WasmModuleIndex* WasmModuleWriter::WriteTo(Zone* zone) const {
-  Sizes sizes = {0, 0};
-
-  sizes.Add(2 * sizeof(uint32_t), 0);  // header
-
-  if (globals_.size() > 0) {
-    sizes.AddSection(WasmSection::Code::Globals, globals_.size());
-    /* These globals never have names, so are always 3 bytes. */
-    sizes.Add(3 * globals_.size(), 0);
-    TRACE("Size after globals: %u, %u\n", (unsigned)sizes.header_size,
-          (unsigned)sizes.body_size);
-  }
-
-  if (signatures_.size() > 0) {
-    sizes.AddSection(WasmSection::Code::Signatures, signatures_.size());
-    for (auto sig : signatures_) {
-      sizes.Add(1 + LEBHelper::sizeof_u32v(sig->parameter_count()) +
-                    sig->parameter_count() +
-                    LEBHelper::sizeof_u32v(sig->return_count()) +
-                    sig->return_count(),
-                0);
-    }
-    TRACE("Size after signatures: %u, %u\n", (unsigned)sizes.header_size,
-          (unsigned)sizes.body_size);
-  }
-
-  if (functions_.size() > 0) {
-    sizes.AddSection(WasmSection::Code::OldFunctions, functions_.size());
-    for (auto function : functions_) {
-      sizes.Add(function->HeaderSize() + function->BodySize(),
-                function->NameSize());
-    }
-    TRACE("Size after functions: %u, %u\n", (unsigned)sizes.header_size,
-          (unsigned)sizes.body_size);
-  }
-
-  if (imports_.size() > 0) {
-    sizes.AddSection(WasmSection::Code::ImportTable, imports_.size());
-    for (auto import : imports_) {
-      sizes.Add(LEBHelper::sizeof_u32v(import.sig_index), 0);
-      sizes.Add(LEBHelper::sizeof_u32v(import.name_length), 0);
-      sizes.Add(import.name_length, 0);
-      sizes.Add(1, 0);
-    }
-    TRACE("Size after imports: %u, %u\n", (unsigned)sizes.header_size,
-          (unsigned)sizes.body_size);
-  }
-
-  if (indirect_functions_.size() > 0) {
-    sizes.AddSection(WasmSection::Code::FunctionTable,
-                     indirect_functions_.size());
-    for (auto function_index : indirect_functions_) {
-      sizes.Add(LEBHelper::sizeof_u32v(function_index), 0);
-    }
-    TRACE("Size after indirect functions: %u, %u\n",
-          (unsigned)sizes.header_size, (unsigned)sizes.body_size);
-  }
-
-  sizes.AddSection(WasmSection::Code::Memory, 0);
-  sizes.Add(kDeclMemorySize, 0);
-  TRACE("Size after memory: %u, %u\n", (unsigned)sizes.header_size,
-        (unsigned)sizes.body_size);
-
-  if (start_function_index_ >= 0) {
-    sizes.AddSection(WasmSection::Code::StartFunction, 0);
-    sizes.Add(LEBHelper::sizeof_u32v(start_function_index_), 0);
-    TRACE("Size after start: %u, %u\n", (unsigned)sizes.header_size,
-          (unsigned)sizes.body_size);
-  }
-
-  if (data_segments_.size() > 0) {
-    sizes.AddSection(WasmSection::Code::DataSegments, data_segments_.size());
-    for (auto segment : data_segments_) {
-      sizes.Add(segment->HeaderSize(), segment->BodySize());
-    }
-    TRACE("Size after data segments: %u, %u\n", (unsigned)sizes.header_size,
-          (unsigned)sizes.body_size);
-  }
-
-  if (sizes.body_size > 0) {
-    sizes.AddSection(WasmSection::Code::End, 0);
-    TRACE("Size after end: %u, %u\n", (unsigned)sizes.header_size,
-          (unsigned)sizes.body_size);
-  }
-
-  ZoneVector<uint8_t> buffer_vector(sizes.total(), zone);
-  byte* buffer = &buffer_vector[0];
-  byte* header = buffer;
-  byte* body = buffer + sizes.header_size;
-
-  // -- emit magic -------------------------------------------------------------
+  // == Emit magic =============================================================
   TRACE("emit magic\n");
-  EmitUint32(&header, kWasmMagic);
-  EmitUint32(&header, kWasmVersion);
+  buffer.write_u32(kWasmMagic);
+  buffer.write_u32(kWasmVersion);
 
-  // -- emit globals -----------------------------------------------------------
-  if (globals_.size() > 0) {
-    byte* section = EmitSection(WasmSection::Code::Globals, &header);
-    EmitVarInt(&header, globals_.size());
-
-    for (auto global : globals_) {
-      EmitVarInt(&header, 0);  // Length of the global name.
-      EmitUint8(&header, WasmOpcodes::MemTypeCodeFor(global.first));
-      EmitUint8(&header, global.second);
-    }
-    FixupSection(section, header);
-  }
-
-  // -- emit signatures --------------------------------------------------------
+  // == Emit signatures ========================================================
   if (signatures_.size() > 0) {
-    byte* section = EmitSection(WasmSection::Code::Signatures, &header);
-    EmitVarInt(&header, signatures_.size());
+    size_t start = EmitSection(WasmSection::Code::Signatures, buffer);
+    buffer.write_size(signatures_.size());
 
     for (FunctionSig* sig : signatures_) {
-      EmitUint8(&header, kWasmFunctionTypeForm);
-      EmitVarInt(&header, sig->parameter_count());
+      buffer.write_u8(kWasmFunctionTypeForm);
+      buffer.write_size(sig->parameter_count());
       for (size_t j = 0; j < sig->parameter_count(); j++) {
-        EmitUint8(&header, WasmOpcodes::LocalTypeCodeFor(sig->GetParam(j)));
+        buffer.write_u8(WasmOpcodes::LocalTypeCodeFor(sig->GetParam(j)));
       }
-      EmitVarInt(&header, sig->return_count());
+      buffer.write_size(sig->return_count());
       for (size_t j = 0; j < sig->return_count(); j++) {
-        EmitUint8(&header, WasmOpcodes::LocalTypeCodeFor(sig->GetReturn(j)));
+        buffer.write_u8(WasmOpcodes::LocalTypeCodeFor(sig->GetReturn(j)));
       }
     }
-    FixupSection(section, header);
+    FixupSection(buffer, start);
   }
 
-  // -- emit imports -----------------------------------------------------------
+  // == Emit globals ===========================================================
+  if (globals_.size() > 0) {
+    size_t start = EmitSection(WasmSection::Code::Globals, buffer);
+    buffer.write_size(globals_.size());
+
+    for (auto global : globals_) {
+      buffer.write_u32v(0);  // Length of the global name.
+      buffer.write_u8(WasmOpcodes::MemTypeCodeFor(global.first));
+      buffer.write_u8(global.second);
+    }
+    FixupSection(buffer, start);
+  }
+
+  // == Emit imports ===========================================================
   if (imports_.size() > 0) {
-    byte* section = EmitSection(WasmSection::Code::ImportTable, &header);
-    EmitVarInt(&header, imports_.size());
+    size_t start = EmitSection(WasmSection::Code::ImportTable, buffer);
+    buffer.write_size(imports_.size());
     for (auto import : imports_) {
-      EmitVarInt(&header, import.sig_index);
-      EmitVarInt(&header, import.name_length);
-      std::memcpy(header, import.name, import.name_length);
-      header += import.name_length;
-      EmitVarInt(&header, 0);
+      buffer.write_u32v(import.sig_index);
+      buffer.write_u32v(import.name_length);
+      buffer.write(reinterpret_cast<const byte*>(import.name),
+                   import.name_length);
+      buffer.write_u32v(0);
     }
-    FixupSection(section, header);
+    FixupSection(buffer, start);
   }
 
-  // -- emit functions ---------------------------------------------------------
+  // == Emit function signatures ===============================================
   if (functions_.size() > 0) {
-    byte* section = EmitSection(WasmSection::Code::OldFunctions, &header);
-    EmitVarInt(&header, functions_.size());
-
-    for (auto func : functions_) {
-      func->Serialize(buffer, &header, &body);
+    size_t start = EmitSection(WasmSection::Code::FunctionSignatures, buffer);
+    buffer.write_size(functions_.size());
+    for (auto function : functions_) {
+      function->WriteSignature(buffer);
+      if (function->exported()) exports++;
     }
-    FixupSection(section, header);
+    FixupSection(buffer, start);
   }
 
-  // -- emit function table ----------------------------------------------------
+  // == emit function table ====================================================
   if (indirect_functions_.size() > 0) {
-    byte* section = EmitSection(WasmSection::Code::FunctionTable, &header);
-    EmitVarInt(&header, indirect_functions_.size());
+    size_t start = EmitSection(WasmSection::Code::FunctionTable, buffer);
+    buffer.write_size(indirect_functions_.size());
 
     for (auto index : indirect_functions_) {
-      EmitVarInt(&header, index);
+      buffer.write_u32v(index);
     }
-    FixupSection(section, header);
+    FixupSection(buffer, start);
   }
 
-  // -- emit memory declaration ------------------------------------------------
+  // == emit memory declaration ================================================
   {
-    byte* section = EmitSection(WasmSection::Code::Memory, &header);
-    EmitVarInt(&header, 16);  // min memory size
-    EmitVarInt(&header, 16);  // max memory size
-    EmitUint8(&header, 0);    // memory export
+    size_t start = EmitSection(WasmSection::Code::Memory, buffer);
+    buffer.write_u32v(16);  // min memory size
+    buffer.write_u32v(16);  // max memory size
+    buffer.write_u8(0);     // memory export
     static_assert(kDeclMemorySize == 3, "memory size must match emit above");
-    FixupSection(section, header);
+    FixupSection(buffer, start);
   }
 
-  // -- emit start function index ----------------------------------------------
+  // == emit exports ===========================================================
+  if (exports > 0) {
+    size_t start = EmitSection(WasmSection::Code::ExportTable, buffer);
+    buffer.write_u32v(exports);
+    uint32_t index = 0;
+    for (auto function : functions_) {
+      function->WriteExport(buffer, index++);
+    }
+    FixupSection(buffer, start);
+  }
+
+  // == emit start function index ==============================================
   if (start_function_index_ >= 0) {
-    byte* section = EmitSection(WasmSection::Code::StartFunction, &header);
-    EmitVarInt(&header, start_function_index_);
-    FixupSection(section, header);
+    size_t start = EmitSection(WasmSection::Code::StartFunction, buffer);
+    buffer.write_u32v(start_function_index_);
+    FixupSection(buffer, start);
   }
 
-  // -- emit data segments -----------------------------------------------------
+  // == emit code ==============================================================
+  if (functions_.size() > 0) {
+    size_t start = EmitSection(WasmSection::Code::FunctionBodies, buffer);
+    buffer.write_size(functions_.size());
+    for (auto function : functions_) {
+      function->WriteBody(buffer);
+    }
+    FixupSection(buffer, start);
+  }
+
+  // == emit data segments =====================================================
   if (data_segments_.size() > 0) {
-    byte* section = EmitSection(WasmSection::Code::DataSegments, &header);
-    EmitVarInt(&header, data_segments_.size());
+    size_t start = EmitSection(WasmSection::Code::DataSegments, buffer);
+    buffer.write_size(data_segments_.size());
 
     for (auto segment : data_segments_) {
-      segment->Serialize(buffer, &header, &body);
+      segment->Write(buffer);
     }
-    FixupSection(section, header);
+    FixupSection(buffer, start);
   }
-
-  if (sizes.body_size > 0) {
-    byte* section = EmitSection(WasmSection::Code::End, &header);
-    FixupSection(section, header);
-  }
-
-  return new (zone) WasmModuleIndex(buffer, buffer + sizes.total());
 }
 }  // namespace wasm
 }  // namespace internal
diff --git a/src/wasm/encoder.h b/src/wasm/encoder.h
index 0f2118d..19fc397 100644
--- a/src/wasm/encoder.h
+++ b/src/wasm/encoder.h
@@ -10,6 +10,7 @@
 
 #include "src/base/smart-pointers.h"
 
+#include "src/wasm/leb-helper.h"
 #include "src/wasm/wasm-macro-gen.h"
 #include "src/wasm/wasm-module.h"
 #include "src/wasm/wasm-opcodes.h"
@@ -19,29 +20,113 @@
 namespace internal {
 namespace wasm {
 
-class WasmModuleBuilder;
-
-class WasmFunctionEncoder : public ZoneObject {
+class ZoneBuffer : public ZoneObject {
  public:
-  uint32_t HeaderSize() const;
-  uint32_t BodySize() const;
-  uint32_t NameSize() const;
-  void Serialize(byte* buffer, byte** header, byte** body) const;
+  static const uint32_t kInitialSize = 4096;
+  explicit ZoneBuffer(Zone* zone, size_t initial = kInitialSize)
+      : zone_(zone), buffer_(reinterpret_cast<byte*>(zone->New(initial))) {
+    pos_ = buffer_;
+    end_ = buffer_ + initial;
+  }
+
+  void write_u8(uint8_t x) {
+    EnsureSpace(1);
+    *(pos_++) = x;
+  }
+
+  void write_u16(uint16_t x) {
+    EnsureSpace(2);
+#if V8_TARGET_LITTLE_ENDIAN
+    WriteUnalignedUInt16(pos_, x);
+#else
+    pos_[0] = x & 0xff;
+    pos_[1] = (x >> 8) & 0xff;
+#endif
+    pos_ += 2;
+  }
+
+  void write_u32(uint32_t x) {
+    EnsureSpace(4);
+#if V8_TARGET_LITTLE_ENDIAN
+    WriteUnalignedUInt32(pos_, x);
+#else
+    pos_[0] = x & 0xff;
+    pos_[1] = (x >> 8) & 0xff;
+    pos_[2] = (x >> 16) & 0xff;
+    pos_[3] = (x >> 24) & 0xff;
+#endif
+    pos_ += 4;
+  }
+
+  void write_u32v(uint32_t val) {
+    EnsureSpace(kMaxVarInt32Size);
+    LEBHelper::write_u32v(&pos_, val);
+  }
+
+  void write_size(size_t val) {
+    EnsureSpace(kMaxVarInt32Size);
+    DCHECK_EQ(val, static_cast<uint32_t>(val));
+    LEBHelper::write_u32v(&pos_, static_cast<uint32_t>(val));
+  }
+
+  void write(const byte* data, size_t size) {
+    EnsureSpace(size);
+    memcpy(pos_, data, size);
+    pos_ += size;
+  }
+
+  size_t reserve_u32v() {
+    size_t off = offset();
+    EnsureSpace(kMaxVarInt32Size);
+    pos_ += kMaxVarInt32Size;
+    return off;
+  }
+
+  // Patch a (padded) u32v at the given offset to be the given value.
+  void patch_u32v(size_t offset, uint32_t val) {
+    byte* ptr = buffer_ + offset;
+    for (size_t pos = 0; pos != kPaddedVarInt32Size; ++pos) {
+      uint32_t next = val >> 7;
+      byte out = static_cast<byte>(val & 0x7f);
+      if (pos != kPaddedVarInt32Size - 1) {
+        *(ptr++) = 0x80 | out;
+        val = next;
+      } else {
+        *(ptr++) = out;
+      }
+    }
+  }
+
+  size_t offset() { return static_cast<size_t>(pos_ - buffer_); }
+  size_t size() { return static_cast<size_t>(pos_ - buffer_); }
+  const byte* begin() { return buffer_; }
+  const byte* end() { return pos_; }
+
+  void EnsureSpace(size_t size) {
+    if ((pos_ + size) > end_) {
+      size_t new_size = 4096 + (end_ - buffer_) * 3;
+      byte* new_buffer = reinterpret_cast<byte*>(zone_->New(new_size));
+      memcpy(new_buffer, buffer_, (pos_ - buffer_));
+      pos_ = new_buffer + (pos_ - buffer_);
+      buffer_ = new_buffer;
+      end_ = new_buffer + new_size;
+    }
+  }
+
+  byte** pos_ptr() { return &pos_; }
 
  private:
-  WasmFunctionEncoder(Zone* zone, LocalDeclEncoder locals, bool exported);
-  friend class WasmFunctionBuilder;
-  uint32_t signature_index_;
-  LocalDeclEncoder locals_;
-  bool exported_;
-  ZoneVector<uint8_t> body_;
-  ZoneVector<char> name_;
-
-  bool HasName() const { return exported_ && name_.size() > 0; }
+  Zone* zone_;
+  byte* buffer_;
+  byte* pos_;
+  byte* end_;
 };
 
+class WasmModuleBuilder;
+
 class WasmFunctionBuilder : public ZoneObject {
  public:
+  // Building methods.
   void SetSignature(FunctionSig* sig);
   uint32_t AddLocal(LocalType type);
   void EmitVarInt(uint32_t val);
@@ -53,71 +138,49 @@
   void EmitWithU8(WasmOpcode opcode, const byte immediate);
   void EmitWithU8U8(WasmOpcode opcode, const byte imm1, const byte imm2);
   void EmitWithVarInt(WasmOpcode opcode, uint32_t immediate);
-  void Exported(uint8_t flag);
+  void SetExported();
   void SetName(const char* name, int name_length);
-  WasmFunctionEncoder* Build(Zone* zone, WasmModuleBuilder* mb) const;
+  bool exported() { return exported_; }
+
+  // Writing methods.
+  void WriteSignature(ZoneBuffer& buffer) const;
+  void WriteExport(ZoneBuffer& buffer, uint32_t func_index) const;
+  void WriteBody(ZoneBuffer& buffer) const;
 
  private:
-  explicit WasmFunctionBuilder(Zone* zone);
+  explicit WasmFunctionBuilder(WasmModuleBuilder* builder);
   friend class WasmModuleBuilder;
+  WasmModuleBuilder* builder_;
   LocalDeclEncoder locals_;
-  uint8_t exported_;
+  uint32_t signature_index_;
+  bool exported_;
   ZoneVector<uint8_t> body_;
   ZoneVector<char> name_;
-  void IndexVars(WasmFunctionEncoder* e, uint32_t* var_index) const;
 };
 
+// TODO(titzer): kill!
 class WasmDataSegmentEncoder : public ZoneObject {
  public:
   WasmDataSegmentEncoder(Zone* zone, const byte* data, uint32_t size,
                          uint32_t dest);
-  uint32_t HeaderSize() const;
-  uint32_t BodySize() const;
-  void Serialize(byte* buffer, byte** header, byte** body) const;
+  void Write(ZoneBuffer& buffer) const;
 
  private:
   ZoneVector<byte> data_;
   uint32_t dest_;
 };
 
-class WasmModuleIndex : public ZoneObject {
- public:
-  const byte* Begin() const { return begin_; }
-  const byte* End() const { return end_; }
-
- private:
-  friend class WasmModuleWriter;
-  WasmModuleIndex(const byte* begin, const byte* end)
-      : begin_(begin), end_(end) {}
-  const byte* begin_;
-  const byte* end_;
-};
-
 struct WasmFunctionImport {
   uint32_t sig_index;
   const char* name;
   int name_length;
 };
 
-class WasmModuleWriter : public ZoneObject {
- public:
-  WasmModuleIndex* WriteTo(Zone* zone) const;
-
- private:
-  friend class WasmModuleBuilder;
-  explicit WasmModuleWriter(Zone* zone);
-  ZoneVector<WasmFunctionImport> imports_;
-  ZoneVector<WasmFunctionEncoder*> functions_;
-  ZoneVector<WasmDataSegmentEncoder*> data_segments_;
-  ZoneVector<FunctionSig*> signatures_;
-  ZoneVector<uint32_t> indirect_functions_;
-  ZoneVector<std::pair<MachineType, bool>> globals_;
-  int start_function_index_;
-};
-
 class WasmModuleBuilder : public ZoneObject {
  public:
   explicit WasmModuleBuilder(Zone* zone);
+
+  // Building methods.
   uint32_t AddFunction();
   uint32_t AddGlobal(MachineType type, bool exported);
   WasmFunctionBuilder* FunctionAt(size_t index);
@@ -126,13 +189,17 @@
   void AddIndirectFunction(uint32_t index);
   void MarkStartFunction(uint32_t index);
   uint32_t AddImport(const char* name, int name_length, FunctionSig* sig);
-  WasmModuleWriter* Build(Zone* zone);
+
+  // Writing methods.
+  void WriteTo(ZoneBuffer& buffer) const;
 
   struct CompareFunctionSigs {
     bool operator()(FunctionSig* a, FunctionSig* b) const;
   };
   typedef ZoneMap<FunctionSig*, uint32_t, CompareFunctionSigs> SignatureMap;
 
+  Zone* zone() { return zone_; }
+
  private:
   Zone* zone_;
   ZoneVector<FunctionSig*> signatures_;
diff --git a/src/wasm/leb-helper.h b/src/wasm/leb-helper.h
index 7ba244d..0e4ba34 100644
--- a/src/wasm/leb-helper.h
+++ b/src/wasm/leb-helper.h
@@ -9,6 +9,9 @@
 namespace internal {
 namespace wasm {
 
+static const size_t kPaddedVarInt32Size = 5;
+static const size_t kMaxVarInt32Size = 5;
+
 class LEBHelper {
  public:
   // Write a 32-bit unsigned LEB to {dest}, updating {dest} to point after
diff --git a/src/wasm/module-decoder.cc b/src/wasm/module-decoder.cc
index f7d26a5..c356eb8 100644
--- a/src/wasm/module-decoder.cc
+++ b/src/wasm/module-decoder.cc
@@ -25,6 +25,8 @@
 #define TRACE(...)
 #endif
 
+namespace {
+
 // The main logic for decoding the bytes of a module.
 class ModuleDecoder : public Decoder {
  public:
@@ -108,9 +110,7 @@
       pos = pc_;
 
       // Read the section name.
-      int string_leb_length = 0;
-      uint32_t string_length =
-          consume_u32v(&string_leb_length, "section name length");
+      uint32_t string_length = consume_u32v("section name length");
       const byte* section_name_start = pc_;
       consume_bytes(string_length);
       if (failed()) {
@@ -118,13 +118,15 @@
         break;
       }
 
+      TRACE("  +%d  section name        : \"%.*s\"\n",
+            static_cast<int>(section_name_start - start_),
+            string_length < 20 ? string_length : 20, section_name_start);
+
       WasmSection::Code section =
           WasmSection::lookup(section_name_start, string_length);
 
       // Read and check the section size.
-      int section_leb_length = 0;
-      uint32_t section_length =
-          consume_u32v(&section_leb_length, "section length");
+      uint32_t section_length = consume_u32v("section length");
       if (!checkAvailable(section_length)) {
         // The section would extend beyond the end of the module.
         break;
@@ -140,18 +142,16 @@
           limit_ = pc_;
           break;
         case WasmSection::Code::Memory: {
-          int length;
-          module->min_mem_pages = consume_u32v(&length, "min memory");
-          module->max_mem_pages = consume_u32v(&length, "max memory");
+          module->min_mem_pages = consume_u32v("min memory");
+          module->max_mem_pages = consume_u32v("max memory");
           module->mem_export = consume_u8("export memory") != 0;
           break;
         }
         case WasmSection::Code::Signatures: {
-          int length;
-          uint32_t signatures_count = consume_u32v(&length, "signatures count");
+          uint32_t signatures_count = consume_u32v("signatures count");
           module->signatures.reserve(SafeReserve(signatures_count));
           // Decode signatures.
-          for (uint32_t i = 0; i < signatures_count; i++) {
+          for (uint32_t i = 0; i < signatures_count; ++i) {
             if (failed()) break;
             TRACE("DecodeSignature[%d] module+%d\n", i,
                   static_cast<int>(pc_ - start_));
@@ -161,37 +161,33 @@
           break;
         }
         case WasmSection::Code::FunctionSignatures: {
-          int length;
-          uint32_t functions_count = consume_u32v(&length, "functions count");
+          uint32_t functions_count = consume_u32v("functions count");
           module->functions.reserve(SafeReserve(functions_count));
-          for (uint32_t i = 0; i < functions_count; i++) {
+          for (uint32_t i = 0; i < functions_count; ++i) {
             module->functions.push_back({nullptr,  // sig
                                          i,        // func_index
                                          0,        // sig_index
                                          0,        // name_offset
                                          0,        // name_length
                                          0,        // code_start_offset
-                                         0,        // code_end_offset
-                                         false});  // exported
+                                         0});      // code_end_offset
             WasmFunction* function = &module->functions.back();
             function->sig_index = consume_sig_index(module, &function->sig);
           }
           break;
         }
         case WasmSection::Code::FunctionBodies: {
-          int length;
           const byte* pos = pc_;
-          uint32_t functions_count = consume_u32v(&length, "functions count");
+          uint32_t functions_count = consume_u32v("functions count");
           if (functions_count != module->functions.size()) {
             error(pos, pos, "function body count %u mismatch (%u expected)",
                   functions_count,
                   static_cast<uint32_t>(module->functions.size()));
             break;
           }
-          for (uint32_t i = 0; i < functions_count; i++) {
+          for (uint32_t i = 0; i < functions_count; ++i) {
             WasmFunction* function = &module->functions[i];
-            int length;
-            uint32_t size = consume_u32v(&length, "body size");
+            uint32_t size = consume_u32v("body size");
             function->code_start_offset = pc_offset();
             function->code_end_offset = pc_offset() + size;
 
@@ -204,48 +200,9 @@
           }
           break;
         }
-        case WasmSection::Code::OldFunctions: {
-          int length;
-          uint32_t functions_count = consume_u32v(&length, "functions count");
-          module->functions.reserve(SafeReserve(functions_count));
-          // Set up module environment for verification.
-          ModuleEnv menv;
-          menv.module = module;
-          menv.instance = nullptr;
-          menv.origin = origin_;
-          // Decode functions.
-          for (uint32_t i = 0; i < functions_count; i++) {
-            if (failed()) break;
-            TRACE("DecodeFunction[%d] module+%d\n", i,
-                  static_cast<int>(pc_ - start_));
-
-            module->functions.push_back({nullptr,  // sig
-                                         i,        // func_index
-                                         0,        // sig_index
-                                         0,        // name_offset
-                                         0,        // name_length
-                                         0,        // code_start_offset
-                                         0,        // code_end_offset
-                                         false});  // exported
-            WasmFunction* function = &module->functions.back();
-            DecodeFunctionInModule(module, function, false);
-          }
-          if (ok() && verify_functions) {
-            for (uint32_t i = 0; i < functions_count; i++) {
-              if (failed()) break;
-              WasmFunction* function = &module->functions[i];
-              VerifyFunctionBody(i, &menv, function);
-              if (result_.failed()) {
-                error(result_.error_pc, result_.error_msg.get());
-              }
-            }
-          }
-          break;
-        }
         case WasmSection::Code::Names: {
-          int length;
           const byte* pos = pc_;
-          uint32_t functions_count = consume_u32v(&length, "functions count");
+          uint32_t functions_count = consume_u32v("functions count");
           if (functions_count != module->functions.size()) {
             error(pos, pos, "function name count %u mismatch (%u expected)",
                   functions_count,
@@ -253,13 +210,12 @@
             break;
           }
 
-          for (uint32_t i = 0; i < functions_count; i++) {
+          for (uint32_t i = 0; i < functions_count; ++i) {
             WasmFunction* function = &module->functions[i];
             function->name_offset =
                 consume_string(&function->name_length, false);
 
-            uint32_t local_names_count =
-                consume_u32v(&length, "local names count");
+            uint32_t local_names_count = consume_u32v("local names count");
             for (uint32_t j = 0; j < local_names_count; j++) {
               uint32_t unused = 0;
               uint32_t offset = consume_string(&unused, false);
@@ -270,11 +226,10 @@
           break;
         }
         case WasmSection::Code::Globals: {
-          int length;
-          uint32_t globals_count = consume_u32v(&length, "globals count");
+          uint32_t globals_count = consume_u32v("globals count");
           module->globals.reserve(SafeReserve(globals_count));
           // Decode globals.
-          for (uint32_t i = 0; i < globals_count; i++) {
+          for (uint32_t i = 0; i < globals_count; ++i) {
             if (failed()) break;
             TRACE("DecodeGlobal[%d] module+%d\n", i,
                   static_cast<int>(pc_ - start_));
@@ -285,12 +240,10 @@
           break;
         }
         case WasmSection::Code::DataSegments: {
-          int length;
-          uint32_t data_segments_count =
-              consume_u32v(&length, "data segments count");
+          uint32_t data_segments_count = consume_u32v("data segments count");
           module->data_segments.reserve(SafeReserve(data_segments_count));
           // Decode data segments.
-          for (uint32_t i = 0; i < data_segments_count; i++) {
+          for (uint32_t i = 0; i < data_segments_count; ++i) {
             if (failed()) break;
             TRACE("DecodeDataSegment[%d] module+%d\n", i,
                   static_cast<int>(pc_ - start_));
@@ -303,25 +256,39 @@
           }
           break;
         }
+        case WasmSection::Code::FunctionTablePad: {
+          if (!FLAG_wasm_jit_prototype) {
+            error("FunctionTablePad section without jiting enabled");
+          }
+          // An indirect function table requires functions first.
+          module->indirect_table_size = consume_u32v("indirect entry count");
+          if (module->indirect_table_size > 0 &&
+              module->indirect_table_size < module->function_table.size()) {
+            error("more predefined indirect entries than table can hold");
+          }
+          break;
+        }
         case WasmSection::Code::FunctionTable: {
           // An indirect function table requires functions first.
           CheckForFunctions(module, section);
-          int length;
-          uint32_t function_table_count =
-              consume_u32v(&length, "function table count");
+          uint32_t function_table_count = consume_u32v("function table count");
           module->function_table.reserve(SafeReserve(function_table_count));
           // Decode function table.
-          for (uint32_t i = 0; i < function_table_count; i++) {
+          for (uint32_t i = 0; i < function_table_count; ++i) {
             if (failed()) break;
             TRACE("DecodeFunctionTable[%d] module+%d\n", i,
                   static_cast<int>(pc_ - start_));
-            uint16_t index = consume_u32v(&length);
+            uint16_t index = consume_u32v();
             if (index >= module->functions.size()) {
               error(pc_ - 2, "invalid function index");
               break;
             }
             module->function_table.push_back(index);
           }
+          if (module->indirect_table_size > 0 &&
+              module->indirect_table_size < module->function_table.size()) {
+            error("more predefined indirect entries than table can hold");
+          }
           break;
         }
         case WasmSection::Code::StartFunction: {
@@ -341,12 +308,10 @@
           break;
         }
         case WasmSection::Code::ImportTable: {
-          int length;
-          uint32_t import_table_count =
-              consume_u32v(&length, "import table count");
+          uint32_t import_table_count = consume_u32v("import table count");
           module->import_table.reserve(SafeReserve(import_table_count));
           // Decode import table.
-          for (uint32_t i = 0; i < import_table_count; i++) {
+          for (uint32_t i = 0; i < import_table_count; ++i) {
             if (failed()) break;
             TRACE("DecodeImportTable[%d] module+%d\n", i,
                   static_cast<int>(pc_ - start_));
@@ -374,12 +339,10 @@
         case WasmSection::Code::ExportTable: {
           // Declares an export table.
           CheckForFunctions(module, section);
-          int length;
-          uint32_t export_table_count =
-              consume_u32v(&length, "export table count");
+          uint32_t export_table_count = consume_u32v("export table count");
           module->export_table.reserve(SafeReserve(export_table_count));
           // Decode export table.
-          for (uint32_t i = 0; i < export_table_count; i++) {
+          for (uint32_t i = 0; i < export_table_count; ++i) {
             if (failed()) break;
             TRACE("DecodeExportTable[%d] module+%d\n", i,
                   static_cast<int>(pc_ - start_));
@@ -393,6 +356,32 @@
             exp->func_index = consume_func_index(module, &func);
             exp->name_offset = consume_string(&exp->name_length, true);
           }
+          // Check for duplicate exports.
+          if (ok() && module->export_table.size() > 1) {
+            std::vector<WasmExport> sorted_exports(module->export_table);
+            const byte* base = start_;
+            auto cmp_less = [base](const WasmExport& a, const WasmExport& b) {
+              // Return true if a < b.
+              uint32_t len = a.name_length;
+              if (len != b.name_length) return len < b.name_length;
+              return memcmp(base + a.name_offset, base + b.name_offset, len) <
+                     0;
+            };
+            std::stable_sort(sorted_exports.begin(), sorted_exports.end(),
+                             cmp_less);
+            auto it = sorted_exports.begin();
+            WasmExport* last = &*it++;
+            for (auto end = sorted_exports.end(); it != end; last = &*it++) {
+              DCHECK(!cmp_less(*it, *last));  // Vector must be sorted.
+              if (!cmp_less(*last, *it)) {
+                const byte* pc = start_ + it->name_offset;
+                error(pc, pc,
+                      "Duplicate export name '%.*s' for functions %d and %d",
+                      it->name_length, pc, last->func_index, it->func_index);
+                break;
+              }
+            }
+          }
           break;
         }
         case WasmSection::Code::Max:
@@ -419,7 +408,9 @@
     }
 
   done:
-    ModuleResult result = toResult(module);
+    if (ok()) CalculateGlobalsOffsets(module);
+    const WasmModule* finished_module = module;
+    ModuleResult result = toResult(finished_module);
     if (FLAG_dump_wasm_module) {
       DumpModule(module, result);
     }
@@ -462,7 +453,6 @@
     function->name_length = 0;                // ---- name length
     function->code_start_offset = off(pc_);   // ---- code start
     function->code_end_offset = off(limit_);  // ---- code end
-    function->exported = false;               // ---- exported
 
     if (ok()) VerifyFunctionBody(0, module_env, function);
 
@@ -489,53 +479,15 @@
   // Decodes a single global entry inside a module starting at {pc_}.
   void DecodeGlobalInModule(WasmGlobal* global) {
     global->name_offset = consume_string(&global->name_length, false);
-    DCHECK(unibrow::Utf8::Validate(start_ + global->name_offset,
-                                   global->name_length));
+    if (!unibrow::Utf8::Validate(start_ + global->name_offset,
+                                 global->name_length)) {
+      error("global name is not valid utf8");
+    }
     global->type = mem_type();
     global->offset = 0;
     global->exported = consume_u8("exported") != 0;
   }
 
-  // Decodes a single function entry inside a module starting at {pc_}.
-  // TODO(titzer): legacy function body; remove
-  void DecodeFunctionInModule(WasmModule* module, WasmFunction* function,
-                              bool verify_body = true) {
-    byte decl_bits = consume_u8("function decl");
-
-    const byte* sigpos = pc_;
-    function->sig_index = consume_u16("signature index");
-
-    if (function->sig_index >= module->signatures.size()) {
-      return error(sigpos, "invalid signature index");
-    } else {
-      function->sig = module->signatures[function->sig_index];
-    }
-
-    TRACE("  +%d  <function attributes:%s%s>\n", static_cast<int>(pc_ - start_),
-          decl_bits & kDeclFunctionName ? " name" : "",
-          decl_bits & kDeclFunctionExport ? " exported" : "");
-
-    function->exported = decl_bits & kDeclFunctionExport;
-
-    if (decl_bits & kDeclFunctionName) {
-      function->name_offset =
-          consume_string(&function->name_length, function->exported);
-    }
-
-    uint16_t size = consume_u16("body size");
-    if (ok()) {
-      if ((pc_ + size) > limit_) {
-        return error(pc_, limit_,
-                     "expected %d bytes for function body, fell off end", size);
-      }
-      function->code_start_offset = static_cast<uint32_t>(pc_ - start_);
-      function->code_end_offset = function->code_start_offset + size;
-      TRACE("  +%d  %-20s: (%d bytes)\n", static_cast<int>(pc_ - start_),
-            "function body", size);
-      pc_ += size;
-    }
-  }
-
   bool IsWithinLimit(uint32_t limit, uint32_t offset, uint32_t size) {
     if (offset > limit) return false;
     if ((offset + size) < offset) return false;  // overflow
@@ -545,9 +497,8 @@
   // Decodes a single data segment entry inside a module starting at {pc_}.
   void DecodeDataSegmentInModule(WasmModule* module, WasmDataSegment* segment) {
     const byte* start = pc_;
-    int length;
-    segment->dest_addr = consume_u32v(&length, "destination");
-    segment->source_size = consume_u32v(&length, "source size");
+    segment->dest_addr = consume_u32v("destination");
+    segment->source_size = consume_u32v("source size");
     segment->source_offset = static_cast<uint32_t>(pc_ - start_);
     segment->init = true;
 
@@ -570,6 +521,22 @@
     consume_bytes(segment->source_size);
   }
 
+  // Calculate individual global offsets and total size of globals table.
+  void CalculateGlobalsOffsets(WasmModule* module) {
+    uint32_t offset = 0;
+    if (module->globals.size() == 0) {
+      module->globals_size = 0;
+      return;
+    }
+    for (WasmGlobal& global : module->globals) {
+      byte size = WasmOpcodes::MemSize(global.type);
+      offset = (offset + size - 1) & ~(size - 1);  // align
+      global.offset = offset;
+      offset += size;
+    }
+    module->globals_size = offset;
+  }
+
   // Verifies the body (code) of a given function.
   void VerifyFunctionBody(uint32_t func_num, ModuleEnv* menv,
                           WasmFunction* function) {
@@ -613,8 +580,7 @@
   // Reads a length-prefixed string, checking that it is within bounds. Returns
   // the offset of the string, and the length as an out parameter.
   uint32_t consume_string(uint32_t* length, bool validate_utf8) {
-    int varint_length;
-    *length = consume_u32v(&varint_length, "string length");
+    *length = consume_u32v("string length");
     uint32_t offset = pc_offset();
     TRACE("  +%u  %-20s: (%u bytes)\n", offset, "string", *length);
     if (validate_utf8 && !unibrow::Utf8::Validate(pc_, *length)) {
@@ -626,8 +592,7 @@
 
   uint32_t consume_sig_index(WasmModule* module, FunctionSig** sig) {
     const byte* pos = pc_;
-    int length;
-    uint32_t sig_index = consume_u32v(&length, "signature index");
+    uint32_t sig_index = consume_u32v("signature index");
     if (sig_index >= module->signatures.size()) {
       error(pos, pos, "signature index %u out of bounds (%d signatures)",
             sig_index, static_cast<int>(module->signatures.size()));
@@ -640,8 +605,7 @@
 
   uint32_t consume_func_index(WasmModule* module, WasmFunction** func) {
     const byte* pos = pc_;
-    int length;
-    uint32_t func_index = consume_u32v(&length, "function index");
+    uint32_t func_index = consume_u32v("function index");
     if (func_index >= module->functions.size()) {
       error(pos, pos, "function index %u out of bounds (%d functions)",
             func_index, static_cast<int>(module->functions.size()));
@@ -698,6 +662,8 @@
         return MachineType::Float32();
       case kMemF64:
         return MachineType::Float64();
+      case kMemS128:
+        return MachineType::Simd128();
       default:
         error(pc_ - 1, "invalid memory type");
         return MachineType::None();
@@ -713,11 +679,10 @@
             kWasmFunctionTypeForm, form);
       return nullptr;
     }
-    int length;
     // parse parameter types
-    uint32_t param_count = consume_u32v(&length, "param count");
+    uint32_t param_count = consume_u32v("param count");
     std::vector<LocalType> params;
-    for (uint32_t i = 0; i < param_count; i++) {
+    for (uint32_t i = 0; i < param_count; ++i) {
       LocalType param = consume_local_type();
       if (param == kAstStmt) error(pc_ - 1, "invalid void parameter type");
       params.push_back(param);
@@ -725,14 +690,14 @@
 
     // parse return types
     const byte* pt = pc_;
-    uint32_t return_count = consume_u32v(&length, "return count");
+    uint32_t return_count = consume_u32v("return count");
     if (return_count > kMaxReturnCount) {
       error(pt, pt, "return count of %u exceeds maximum of %u", return_count,
             kMaxReturnCount);
       return nullptr;
     }
     std::vector<LocalType> returns;
-    for (uint32_t i = 0; i < return_count; i++) {
+    for (uint32_t i = 0; i < return_count; ++i) {
       LocalType ret = consume_local_type();
       if (ret == kAstStmt) error(pc_ - 1, "invalid void return type");
       returns.push_back(ret);
@@ -742,8 +707,8 @@
     LocalType* buffer =
         module_zone->NewArray<LocalType>(param_count + return_count);
     uint32_t b = 0;
-    for (uint32_t i = 0; i < return_count; i++) buffer[b++] = returns[i];
-    for (uint32_t i = 0; i < param_count; i++) buffer[b++] = params[i];
+    for (uint32_t i = 0; i < return_count; ++i) buffer[b++] = returns[i];
+    for (uint32_t i = 0; i < param_count; ++i) buffer[b++] = params[i];
 
     return new (module_zone) FunctionSig(return_count, param_count, buffer);
   }
@@ -775,6 +740,41 @@
   }
 };
 
+Vector<const byte> FindSection(const byte* module_start, const byte* module_end,
+                               WasmSection::Code code) {
+  Decoder decoder(module_start, module_end);
+
+  uint32_t magic_word = decoder.consume_u32("wasm magic");
+  if (magic_word != kWasmMagic) decoder.error("wrong magic word");
+
+  uint32_t magic_version = decoder.consume_u32("wasm version");
+  if (magic_version != kWasmVersion) decoder.error("wrong wasm version");
+
+  while (decoder.more() && decoder.ok()) {
+    // Read the section name.
+    uint32_t string_length = decoder.consume_u32v("section name length");
+    const byte* section_name_start = decoder.pc();
+    decoder.consume_bytes(string_length);
+    if (decoder.failed()) break;
+
+    WasmSection::Code section =
+        WasmSection::lookup(section_name_start, string_length);
+
+    // Read and check the section size.
+    uint32_t section_length = decoder.consume_u32v("section length");
+
+    const byte* section_start = decoder.pc();
+    decoder.consume_bytes(section_length);
+    if (section == code && decoder.ok()) {
+      return Vector<const uint8_t>(section_start, section_length);
+    }
+  }
+
+  return Vector<const uint8_t>();
+}
+
+}  // namespace
+
 ModuleResult DecodeWasmModule(Isolate* isolate, Zone* zone,
                               const byte* module_start, const byte* module_end,
                               bool verify_functions, ModuleOrigin origin) {
@@ -818,6 +818,33 @@
   ModuleDecoder decoder(zone, function_start, function_end, kWasmOrigin);
   return decoder.DecodeSingleFunction(module_env, function);
 }
+
+FunctionOffsetsResult DecodeWasmFunctionOffsets(const byte* module_start,
+                                                const byte* module_end) {
+  Vector<const byte> code_section =
+      FindSection(module_start, module_end, WasmSection::Code::FunctionBodies);
+  Decoder decoder(code_section.start(), code_section.end());
+  if (!code_section.start()) decoder.error("no code section");
+
+  uint32_t functions_count = decoder.consume_u32v("functions count");
+  FunctionOffsets table;
+  // Take care of invalid input here.
+  if (functions_count < static_cast<unsigned>(code_section.length()) / 2)
+    table.reserve(functions_count);
+  int section_offset = static_cast<int>(code_section.start() - module_start);
+  DCHECK_LE(0, section_offset);
+  for (uint32_t i = 0; i < functions_count && decoder.ok(); ++i) {
+    uint32_t size = decoder.consume_u32v("body size");
+    int offset = static_cast<int>(section_offset + decoder.pc_offset());
+    table.push_back(std::make_pair(offset, static_cast<int>(size)));
+    DCHECK(table.back().first >= 0 && table.back().second >= 0);
+    decoder.consume_bytes(size);
+  }
+  if (decoder.more()) decoder.error("unexpected additional bytes");
+
+  return decoder.toResult(std::move(table));
+}
+
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
diff --git a/src/wasm/module-decoder.h b/src/wasm/module-decoder.h
index 00a9b87..dd6bd3b 100644
--- a/src/wasm/module-decoder.h
+++ b/src/wasm/module-decoder.h
@@ -26,6 +26,13 @@
 FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone, ModuleEnv* env,
                                   const byte* function_start,
                                   const byte* function_end);
+
+// Extracts the function offset table from the wasm module bytes.
+// Returns a vector with <offset, length> entries, or failure if the wasm bytes
+// are detected as invalid. Note that this validation is not complete.
+FunctionOffsetsResult DecodeWasmFunctionOffsets(const byte* module_start,
+                                                const byte* module_end);
+
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
diff --git a/src/wasm/switch-logic.cc b/src/wasm/switch-logic.cc
index f8e3f0d..9ebc0b3 100644
--- a/src/wasm/switch-logic.cc
+++ b/src/wasm/switch-logic.cc
@@ -34,7 +34,7 @@
   }
   std::sort(cases->begin(), cases->end());
   ZoneVector<size_t> table_breaks(zone);
-  for (size_t i = 1; i < cases->size(); i++) {
+  for (size_t i = 1; i < cases->size(); ++i) {
     if (cases->at(i) - cases->at(i - 1) > max_distance) {
       table_breaks.push_back(i);
     }
@@ -42,7 +42,7 @@
   table_breaks.push_back(cases->size());
   ZoneVector<CaseNode*> nodes(zone);
   size_t curr_pos = 0;
-  for (size_t i = 0; i < table_breaks.size(); i++) {
+  for (size_t i = 0; i < table_breaks.size(); ++i) {
     size_t break_pos = table_breaks[i];
     if (break_pos - curr_pos >= min_size) {
       int begin = cases->at(curr_pos);
diff --git a/src/wasm/wasm-debug.cc b/src/wasm/wasm-debug.cc
new file mode 100644
index 0000000..5b9c2cb
--- /dev/null
+++ b/src/wasm/wasm-debug.cc
@@ -0,0 +1,185 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-debug.h"
+
+#include "src/assert-scope.h"
+#include "src/debug/debug.h"
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-module.h"
+
+using namespace v8::internal;
+using namespace v8::internal::wasm;
+
+namespace {
+
+enum {
+  kWasmDebugInfoWasmObj,
+  kWasmDebugInfoWasmBytesHash,
+  kWasmDebugInfoFunctionByteOffsets,
+  kWasmDebugInfoNumEntries
+};
+
+ByteArray *GetOrCreateFunctionOffsetTable(Handle<WasmDebugInfo> debug_info) {
+  Object *offset_table = debug_info->get(kWasmDebugInfoFunctionByteOffsets);
+  Isolate *isolate = debug_info->GetIsolate();
+  if (!offset_table->IsUndefined(isolate)) return ByteArray::cast(offset_table);
+
+  FunctionOffsetsResult function_offsets;
+  {
+    DisallowHeapAllocation no_gc;
+    SeqOneByteString *wasm_bytes =
+        wasm::GetWasmBytes(debug_info->wasm_object());
+    const byte *bytes_start = wasm_bytes->GetChars();
+    const byte *bytes_end = bytes_start + wasm_bytes->length();
+    function_offsets = wasm::DecodeWasmFunctionOffsets(bytes_start, bytes_end);
+  }
+  DCHECK(function_offsets.ok());
+  size_t array_size = 2 * kIntSize * function_offsets.val.size();
+  CHECK_LE(array_size, static_cast<size_t>(kMaxInt));
+  ByteArray *arr =
+      *isolate->factory()->NewByteArray(static_cast<int>(array_size));
+  int idx = 0;
+  for (std::pair<int, int> p : function_offsets.val) {
+    arr->set_int(idx++, p.first);
+    arr->set_int(idx++, p.second);
+  }
+  DCHECK_EQ(arr->length(), idx * kIntSize);
+  debug_info->set(kWasmDebugInfoFunctionByteOffsets, arr);
+
+  return arr;
+}
+
+std::pair<int, int> GetFunctionOffsetAndLength(Handle<WasmDebugInfo> debug_info,
+                                               int func_index) {
+  ByteArray *arr = GetOrCreateFunctionOffsetTable(debug_info);
+  DCHECK(func_index >= 0 && func_index < arr->length() / kIntSize / 2);
+
+  int offset = arr->get_int(2 * func_index);
+  int length = arr->get_int(2 * func_index + 1);
+  // Assert that it's distinguishable from the "illegal function index" return.
+  DCHECK(offset > 0 && length > 0);
+  return {offset, length};
+}
+
+Vector<const uint8_t> GetFunctionBytes(Handle<WasmDebugInfo> debug_info,
+                                       int func_index) {
+  SeqOneByteString *module_bytes =
+      wasm::GetWasmBytes(debug_info->wasm_object());
+  std::pair<int, int> offset_and_length =
+      GetFunctionOffsetAndLength(debug_info, func_index);
+  return Vector<const uint8_t>(
+      module_bytes->GetChars() + offset_and_length.first,
+      offset_and_length.second);
+}
+
+}  // namespace
+
+Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<JSObject> wasm) {
+  Isolate *isolate = wasm->GetIsolate();
+  Factory *factory = isolate->factory();
+  Handle<FixedArray> arr =
+      factory->NewFixedArray(kWasmDebugInfoNumEntries, TENURED);
+  arr->set(kWasmDebugInfoWasmObj, *wasm);
+  int hash = 0;
+  Handle<SeqOneByteString> wasm_bytes(GetWasmBytes(*wasm), isolate);
+  {
+    DisallowHeapAllocation no_gc;
+    hash = StringHasher::HashSequentialString(
+        wasm_bytes->GetChars(), wasm_bytes->length(), kZeroHashSeed);
+  }
+  Handle<Object> hash_obj = factory->NewNumberFromInt(hash, TENURED);
+  arr->set(kWasmDebugInfoWasmBytesHash, *hash_obj);
+
+  return Handle<WasmDebugInfo>::cast(arr);
+}
+
+bool WasmDebugInfo::IsDebugInfo(Object *object) {
+  if (!object->IsFixedArray()) return false;
+  FixedArray *arr = FixedArray::cast(object);
+  Isolate *isolate = arr->GetIsolate();
+  return arr->length() == kWasmDebugInfoNumEntries &&
+         IsWasmObject(arr->get(kWasmDebugInfoWasmObj)) &&
+         arr->get(kWasmDebugInfoWasmBytesHash)->IsNumber() &&
+         (arr->get(kWasmDebugInfoFunctionByteOffsets)->IsUndefined(isolate) ||
+          arr->get(kWasmDebugInfoFunctionByteOffsets)->IsByteArray());
+}
+
+WasmDebugInfo *WasmDebugInfo::cast(Object *object) {
+  DCHECK(IsDebugInfo(object));
+  return reinterpret_cast<WasmDebugInfo *>(object);
+}
+
+JSObject *WasmDebugInfo::wasm_object() {
+  return JSObject::cast(get(kWasmDebugInfoWasmObj));
+}
+
+bool WasmDebugInfo::SetBreakPoint(int byte_offset) {
+  // TODO(clemensh): Implement this.
+  return false;
+}
+
+Handle<String> WasmDebugInfo::DisassembleFunction(
+    Handle<WasmDebugInfo> debug_info, int func_index) {
+  std::ostringstream disassembly_os;
+
+  {
+    Vector<const uint8_t> bytes_vec = GetFunctionBytes(debug_info, func_index);
+    DisallowHeapAllocation no_gc;
+
+    base::AccountingAllocator allocator;
+    bool ok = PrintAst(
+        &allocator, FunctionBodyForTesting(bytes_vec.start(), bytes_vec.end()),
+        disassembly_os, nullptr);
+    DCHECK(ok);
+    USE(ok);
+  }
+
+  // Unfortunately, we have to copy the string here.
+  std::string code_str = disassembly_os.str();
+  CHECK_LE(code_str.length(), static_cast<size_t>(kMaxInt));
+  Factory *factory = debug_info->GetIsolate()->factory();
+  Vector<const char> code_vec(code_str.data(),
+                              static_cast<int>(code_str.length()));
+  return factory->NewStringFromAscii(code_vec).ToHandleChecked();
+}
+
+Handle<FixedArray> WasmDebugInfo::GetFunctionOffsetTable(
+    Handle<WasmDebugInfo> debug_info, int func_index) {
+  class NullBuf : public std::streambuf {};
+  NullBuf null_buf;
+  std::ostream null_stream(&null_buf);
+
+  std::vector<std::tuple<uint32_t, int, int>> offset_table_vec;
+
+  {
+    Vector<const uint8_t> bytes_vec = GetFunctionBytes(debug_info, func_index);
+    DisallowHeapAllocation no_gc;
+
+    v8::base::AccountingAllocator allocator;
+    bool ok = PrintAst(
+        &allocator, FunctionBodyForTesting(bytes_vec.start(), bytes_vec.end()),
+        null_stream, &offset_table_vec);
+    DCHECK(ok);
+    USE(ok);
+  }
+
+  size_t arr_size = 3 * offset_table_vec.size();
+  CHECK_LE(arr_size, static_cast<size_t>(kMaxInt));
+  Factory *factory = debug_info->GetIsolate()->factory();
+  Handle<FixedArray> offset_table =
+      factory->NewFixedArray(static_cast<int>(arr_size), TENURED);
+
+  int idx = 0;
+  for (std::tuple<uint32_t, int, int> elem : offset_table_vec) {
+    offset_table->set(idx++, Smi::FromInt(std::get<0>(elem)));
+    offset_table->set(idx++, Smi::FromInt(std::get<1>(elem)));
+    offset_table->set(idx++, Smi::FromInt(std::get<2>(elem)));
+  }
+  DCHECK_EQ(idx, offset_table->length());
+
+  return offset_table;
+}
diff --git a/src/wasm/wasm-debug.h b/src/wasm/wasm-debug.h
new file mode 100644
index 0000000..48dc4be
--- /dev/null
+++ b/src/wasm/wasm-debug.h
@@ -0,0 +1,41 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_DEBUG_H_
+#define V8_WASM_DEBUG_H_
+
+#include "src/handles.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class WasmDebugInfo : public FixedArray {
+ public:
+  static Handle<WasmDebugInfo> New(Handle<JSObject> wasm);
+
+  static bool IsDebugInfo(Object* object);
+  static WasmDebugInfo* cast(Object* object);
+
+  JSObject* wasm_object();
+
+  bool SetBreakPoint(int byte_offset);
+
+  // Disassemble the specified function from this module.
+  static Handle<String> DisassembleFunction(Handle<WasmDebugInfo> debug_info,
+                                            int func_index);
+
+  // Get the offset table for the specified function.
+  // Returns an array with three entries per instruction: byte offset, line and
+  // column.
+  static Handle<FixedArray> GetFunctionOffsetTable(
+      Handle<WasmDebugInfo> debug_info, int func_index);
+};
+
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_WASM_DEBUG_H_
diff --git a/src/wasm/wasm-function-name-table.cc b/src/wasm/wasm-function-name-table.cc
index f082704..32c302d 100644
--- a/src/wasm/wasm-function-name-table.cc
+++ b/src/wasm/wasm-function-name-table.cc
@@ -14,59 +14,56 @@
 // module, then the first (kIntSize * (N+1)) bytes are integer entries.
 // The first integer entry encodes the number of functions in the module.
 // The entries 1 to N contain offsets into the second part of this array.
+// If a function is unnamed (not to be confused with an empty name), then the
+// integer entry is the negative offset of the next function name.
 // After these N+1 integer entries, the second part begins, which holds a
 // concatenation of all function names.
-//
-// Returns undefined if the array length would not fit in an integer value
-Handle<Object> BuildFunctionNamesTable(Isolate* isolate, WasmModule* module) {
+Handle<ByteArray> BuildFunctionNamesTable(Isolate* isolate,
+                                          const WasmModule* module) {
   uint64_t func_names_length = 0;
   for (auto& func : module->functions) func_names_length += func.name_length;
   int num_funcs_int = static_cast<int>(module->functions.size());
   int current_offset = (num_funcs_int + 1) * kIntSize;
   uint64_t total_array_length = current_offset + func_names_length;
   int total_array_length_int = static_cast<int>(total_array_length);
-  // Check for overflow. Just skip function names if it happens.
-  if (total_array_length_int != total_array_length || num_funcs_int < 0 ||
-      num_funcs_int != module->functions.size())
-    return isolate->factory()->undefined_value();
+  // Check for overflow.
+  CHECK(total_array_length_int == total_array_length && num_funcs_int >= 0 &&
+        num_funcs_int == module->functions.size());
   Handle<ByteArray> func_names_array =
       isolate->factory()->NewByteArray(total_array_length_int, TENURED);
-  if (func_names_array.is_null()) return isolate->factory()->undefined_value();
   func_names_array->set_int(0, num_funcs_int);
   int func_index = 0;
-  for (WasmFunction& fun : module->functions) {
+  for (const WasmFunction& fun : module->functions) {
     WasmName name = module->GetNameOrNull(&fun);
-    func_names_array->copy_in(current_offset,
-                              reinterpret_cast<const byte*>(name.start()),
-                              name.length());
-    func_names_array->set_int(func_index + 1, current_offset);
-    current_offset += name.length();
+    if (name.start() == nullptr) {
+      func_names_array->set_int(func_index + 1, -current_offset);
+    } else {
+      func_names_array->copy_in(current_offset,
+                                reinterpret_cast<const byte*>(name.start()),
+                                name.length());
+      func_names_array->set_int(func_index + 1, current_offset);
+      current_offset += name.length();
+    }
     ++func_index;
   }
   return func_names_array;
 }
 
-Handle<Object> GetWasmFunctionNameFromTable(Handle<ByteArray> func_names_array,
-                                            uint32_t func_index) {
+MaybeHandle<String> GetWasmFunctionNameFromTable(
+    Handle<ByteArray> func_names_array, uint32_t func_index) {
   uint32_t num_funcs = static_cast<uint32_t>(func_names_array->get_int(0));
   DCHECK(static_cast<int>(num_funcs) >= 0);
-  auto undefined = [&func_names_array]() -> Handle<Object> {
-    return func_names_array->GetIsolate()->factory()->undefined_value();
-  };
-  if (func_index >= num_funcs) return undefined();
+  Factory* factory = func_names_array->GetIsolate()->factory();
+  if (func_index >= num_funcs) return {};
   int offset = func_names_array->get_int(func_index + 1);
+  if (offset < 0) return {};
   int next_offset = func_index == num_funcs - 1
                         ? func_names_array->length()
-                        : func_names_array->get_int(func_index + 2);
+                        : abs(func_names_array->get_int(func_index + 2));
   ScopedVector<byte> buffer(next_offset - offset);
   func_names_array->copy_out(offset, buffer.start(), next_offset - offset);
-  if (!unibrow::Utf8::Validate(buffer.start(), buffer.length())) {
-    return undefined();
-  }
-  MaybeHandle<Object> maybe_name =
-      func_names_array->GetIsolate()->factory()->NewStringFromUtf8(
-          Vector<const char>::cast(buffer));
-  return maybe_name.is_null() ? undefined() : maybe_name.ToHandleChecked();
+  if (!unibrow::Utf8::Validate(buffer.start(), buffer.length())) return {};
+  return factory->NewStringFromUtf8(Vector<const char>::cast(buffer));
 }
 
 }  // namespace wasm
diff --git a/src/wasm/wasm-function-name-table.h b/src/wasm/wasm-function-name-table.h
index 1a71372..ffee782 100644
--- a/src/wasm/wasm-function-name-table.h
+++ b/src/wasm/wasm-function-name-table.h
@@ -16,12 +16,15 @@
 struct WasmModule;
 
 // Encode all function names of the WasmModule into one ByteArray.
-Handle<Object> BuildFunctionNamesTable(Isolate* isolate, WasmModule* module);
+Handle<ByteArray> BuildFunctionNamesTable(Isolate* isolate,
+                                          const WasmModule* module);
 
-// Extract the function name for the given func_index from the wasm module.
-// Returns undefined if the function index is invalid.
-Handle<Object> GetWasmFunctionNameFromTable(Handle<ByteArray> wasm_names_table,
-                                            uint32_t func_index);
+// Extract the function name for the given func_index from the function name
+// table.
+// Returns a null handle if the respective function is unnamed (not to be
+// confused with empty names) or the function name is not a valid UTF-8 string.
+MaybeHandle<String> GetWasmFunctionNameFromTable(
+    Handle<ByteArray> wasm_names_table, uint32_t func_index);
 
 }  // namespace wasm
 }  // namespace internal
diff --git a/src/wasm/wasm-interpreter.cc b/src/wasm/wasm-interpreter.cc
new file mode 100644
index 0000000..a88fa93
--- /dev/null
+++ b/src/wasm/wasm-interpreter.cc
@@ -0,0 +1,1830 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-interpreter.h"
+#include "src/wasm/ast-decoder.h"
+#include "src/wasm/decoder.h"
+#include "src/wasm/wasm-external-refs.h"
+#include "src/wasm/wasm-module.h"
+
+#include "src/base/accounting-allocator.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+#if DEBUG
+#define TRACE(...)                                        \
+  do {                                                    \
+    if (FLAG_trace_wasm_interpreter) PrintF(__VA_ARGS__); \
+  } while (false)
+#else
+#define TRACE(...)
+#endif
+
+#define FOREACH_INTERNAL_OPCODE(V) V(Breakpoint, 0xFF)
+
+#define FOREACH_SIMPLE_BINOP(V) \
+  V(I32Add, uint32_t, +)        \
+  V(I32Sub, uint32_t, -)        \
+  V(I32Mul, uint32_t, *)        \
+  V(I32And, uint32_t, &)        \
+  V(I32Ior, uint32_t, |)        \
+  V(I32Xor, uint32_t, ^)        \
+  V(I32Eq, uint32_t, ==)        \
+  V(I32Ne, uint32_t, !=)        \
+  V(I32LtU, uint32_t, <)        \
+  V(I32LeU, uint32_t, <=)       \
+  V(I32GtU, uint32_t, >)        \
+  V(I32GeU, uint32_t, >=)       \
+  V(I32LtS, int32_t, <)         \
+  V(I32LeS, int32_t, <=)        \
+  V(I32GtS, int32_t, >)         \
+  V(I32GeS, int32_t, >=)        \
+  V(I64Add, uint64_t, +)        \
+  V(I64Sub, uint64_t, -)        \
+  V(I64Mul, uint64_t, *)        \
+  V(I64And, uint64_t, &)        \
+  V(I64Ior, uint64_t, |)        \
+  V(I64Xor, uint64_t, ^)        \
+  V(I64Eq, uint64_t, ==)        \
+  V(I64Ne, uint64_t, !=)        \
+  V(I64LtU, uint64_t, <)        \
+  V(I64LeU, uint64_t, <=)       \
+  V(I64GtU, uint64_t, >)        \
+  V(I64GeU, uint64_t, >=)       \
+  V(I64LtS, int64_t, <)         \
+  V(I64LeS, int64_t, <=)        \
+  V(I64GtS, int64_t, >)         \
+  V(I64GeS, int64_t, >=)        \
+  V(F32Add, float, +)           \
+  V(F32Mul, float, *)           \
+  V(F32Div, float, /)           \
+  V(F32Eq, float, ==)           \
+  V(F32Ne, float, !=)           \
+  V(F32Lt, float, <)            \
+  V(F32Le, float, <=)           \
+  V(F32Gt, float, >)            \
+  V(F32Ge, float, >=)           \
+  V(F64Add, double, +)          \
+  V(F64Mul, double, *)          \
+  V(F64Div, double, /)          \
+  V(F64Eq, double, ==)          \
+  V(F64Ne, double, !=)          \
+  V(F64Lt, double, <)           \
+  V(F64Le, double, <=)          \
+  V(F64Gt, double, >)           \
+  V(F64Ge, double, >=)
+
+#define FOREACH_OTHER_BINOP(V) \
+  V(I32DivS, int32_t)          \
+  V(I32DivU, uint32_t)         \
+  V(I32RemS, int32_t)          \
+  V(I32RemU, uint32_t)         \
+  V(I32Shl, uint32_t)          \
+  V(I32ShrU, uint32_t)         \
+  V(I32ShrS, int32_t)          \
+  V(I64DivS, int64_t)          \
+  V(I64DivU, uint64_t)         \
+  V(I64RemS, int64_t)          \
+  V(I64RemU, uint64_t)         \
+  V(I64Shl, uint64_t)          \
+  V(I64ShrU, uint64_t)         \
+  V(I64ShrS, int64_t)          \
+  V(I32Ror, int32_t)           \
+  V(I32Rol, int32_t)           \
+  V(I64Ror, int64_t)           \
+  V(I64Rol, int64_t)           \
+  V(F32Sub, float)             \
+  V(F32Min, float)             \
+  V(F32Max, float)             \
+  V(F32CopySign, float)        \
+  V(F64Min, double)            \
+  V(F64Max, double)            \
+  V(F64Sub, double)            \
+  V(F64CopySign, double)       \
+  V(I32AsmjsDivS, int32_t)     \
+  V(I32AsmjsDivU, uint32_t)    \
+  V(I32AsmjsRemS, int32_t)     \
+  V(I32AsmjsRemU, uint32_t)
+
+#define FOREACH_OTHER_UNOP(V)    \
+  V(I32Clz, uint32_t)            \
+  V(I32Ctz, uint32_t)            \
+  V(I32Popcnt, uint32_t)         \
+  V(I32Eqz, uint32_t)            \
+  V(I64Clz, uint64_t)            \
+  V(I64Ctz, uint64_t)            \
+  V(I64Popcnt, uint64_t)         \
+  V(I64Eqz, uint64_t)            \
+  V(F32Abs, float)               \
+  V(F32Neg, float)               \
+  V(F32Ceil, float)              \
+  V(F32Floor, float)             \
+  V(F32Trunc, float)             \
+  V(F32NearestInt, float)        \
+  V(F32Sqrt, float)              \
+  V(F64Abs, double)              \
+  V(F64Neg, double)              \
+  V(F64Ceil, double)             \
+  V(F64Floor, double)            \
+  V(F64Trunc, double)            \
+  V(F64NearestInt, double)       \
+  V(F64Sqrt, double)             \
+  V(I32SConvertF32, float)       \
+  V(I32SConvertF64, double)      \
+  V(I32UConvertF32, float)       \
+  V(I32UConvertF64, double)      \
+  V(I32ConvertI64, int64_t)      \
+  V(I64SConvertF32, float)       \
+  V(I64SConvertF64, double)      \
+  V(I64UConvertF32, float)       \
+  V(I64UConvertF64, double)      \
+  V(I64SConvertI32, int32_t)     \
+  V(I64UConvertI32, uint32_t)    \
+  V(F32SConvertI32, int32_t)     \
+  V(F32UConvertI32, uint32_t)    \
+  V(F32SConvertI64, int64_t)     \
+  V(F32UConvertI64, uint64_t)    \
+  V(F32ConvertF64, double)       \
+  V(F32ReinterpretI32, int32_t)  \
+  V(F64SConvertI32, int32_t)     \
+  V(F64UConvertI32, uint32_t)    \
+  V(F64SConvertI64, int64_t)     \
+  V(F64UConvertI64, uint64_t)    \
+  V(F64ConvertF32, float)        \
+  V(F64ReinterpretI64, int64_t)  \
+  V(I32ReinterpretF32, float)    \
+  V(I64ReinterpretF64, double)   \
+  V(I32AsmjsSConvertF32, float)  \
+  V(I32AsmjsUConvertF32, float)  \
+  V(I32AsmjsSConvertF64, double) \
+  V(I32AsmjsUConvertF64, double)
+
+static inline int32_t ExecuteI32DivS(int32_t a, int32_t b, TrapReason* trap) {
+  if (b == 0) {
+    *trap = kTrapDivByZero;
+    return 0;
+  }
+  if (b == -1 && a == std::numeric_limits<int32_t>::min()) {
+    *trap = kTrapDivUnrepresentable;
+    return 0;
+  }
+  return a / b;
+}
+
+static inline uint32_t ExecuteI32DivU(uint32_t a, uint32_t b,
+                                      TrapReason* trap) {
+  if (b == 0) {
+    *trap = kTrapDivByZero;
+    return 0;
+  }
+  return a / b;
+}
+
+static inline int32_t ExecuteI32RemS(int32_t a, int32_t b, TrapReason* trap) {
+  if (b == 0) {
+    *trap = kTrapRemByZero;
+    return 0;
+  }
+  if (b == -1) return 0;
+  return a % b;
+}
+
+static inline uint32_t ExecuteI32RemU(uint32_t a, uint32_t b,
+                                      TrapReason* trap) {
+  if (b == 0) {
+    *trap = kTrapRemByZero;
+    return 0;
+  }
+  return a % b;
+}
+
+static inline uint32_t ExecuteI32Shl(uint32_t a, uint32_t b, TrapReason* trap) {
+  return a << (b & 0x1f);
+}
+
+static inline uint32_t ExecuteI32ShrU(uint32_t a, uint32_t b,
+                                      TrapReason* trap) {
+  return a >> (b & 0x1f);
+}
+
+static inline int32_t ExecuteI32ShrS(int32_t a, int32_t b, TrapReason* trap) {
+  return a >> (b & 0x1f);
+}
+
+static inline int64_t ExecuteI64DivS(int64_t a, int64_t b, TrapReason* trap) {
+  if (b == 0) {
+    *trap = kTrapDivByZero;
+    return 0;
+  }
+  if (b == -1 && a == std::numeric_limits<int64_t>::min()) {
+    *trap = kTrapDivUnrepresentable;
+    return 0;
+  }
+  return a / b;
+}
+
+static inline uint64_t ExecuteI64DivU(uint64_t a, uint64_t b,
+                                      TrapReason* trap) {
+  if (b == 0) {
+    *trap = kTrapDivByZero;
+    return 0;
+  }
+  return a / b;
+}
+
+static inline int64_t ExecuteI64RemS(int64_t a, int64_t b, TrapReason* trap) {
+  if (b == 0) {
+    *trap = kTrapRemByZero;
+    return 0;
+  }
+  if (b == -1) return 0;
+  return a % b;
+}
+
+static inline uint64_t ExecuteI64RemU(uint64_t a, uint64_t b,
+                                      TrapReason* trap) {
+  if (b == 0) {
+    *trap = kTrapRemByZero;
+    return 0;
+  }
+  return a % b;
+}
+
+static inline uint64_t ExecuteI64Shl(uint64_t a, uint64_t b, TrapReason* trap) {
+  return a << (b & 0x3f);
+}
+
+static inline uint64_t ExecuteI64ShrU(uint64_t a, uint64_t b,
+                                      TrapReason* trap) {
+  return a >> (b & 0x3f);
+}
+
+static inline int64_t ExecuteI64ShrS(int64_t a, int64_t b, TrapReason* trap) {
+  return a >> (b & 0x3f);
+}
+
+static inline uint32_t ExecuteI32Ror(uint32_t a, uint32_t b, TrapReason* trap) {
+  uint32_t shift = (b & 0x1f);
+  return (a >> shift) | (a << (32 - shift));
+}
+
+static inline uint32_t ExecuteI32Rol(uint32_t a, uint32_t b, TrapReason* trap) {
+  uint32_t shift = (b & 0x1f);
+  return (a << shift) | (a >> (32 - shift));
+}
+
+static inline uint64_t ExecuteI64Ror(uint64_t a, uint64_t b, TrapReason* trap) {
+  uint32_t shift = (b & 0x3f);
+  return (a >> shift) | (a << (64 - shift));
+}
+
+static inline uint64_t ExecuteI64Rol(uint64_t a, uint64_t b, TrapReason* trap) {
+  uint32_t shift = (b & 0x3f);
+  return (a << shift) | (a >> (64 - shift));
+}
+
+static float quiet(float a) {
+  static const uint32_t kSignalingBit = 1 << 22;
+  uint32_t q = bit_cast<uint32_t>(std::numeric_limits<float>::quiet_NaN());
+  if ((q & kSignalingBit) != 0) {
+    // On some machines, the signaling bit set indicates it's a quiet NaN.
+    return bit_cast<float>(bit_cast<uint32_t>(a) | kSignalingBit);
+  } else {
+    // On others, the signaling bit set indicates it's a signaling NaN.
+    return bit_cast<float>(bit_cast<uint32_t>(a) & ~kSignalingBit);
+  }
+}
+
+static double quiet(double a) {
+  static const uint64_t kSignalingBit = 1ULL << 51;
+  uint64_t q = bit_cast<uint64_t>(std::numeric_limits<double>::quiet_NaN());
+  if ((q & kSignalingBit) != 0) {
+    // On some machines, the signaling bit set indicates it's a quiet NaN.
+    return bit_cast<double>(bit_cast<uint64_t>(a) | kSignalingBit);
+  } else {
+    // On others, the signaling bit set indicates it's a signaling NaN.
+    return bit_cast<double>(bit_cast<uint64_t>(a) & ~kSignalingBit);
+  }
+}
+
+static inline float ExecuteF32Sub(float a, float b, TrapReason* trap) {
+  float result = a - b;
+  // Some architectures (e.g. MIPS) need extra checking to preserve the payload
+  // of a NaN operand.
+  if (result - result != 0) {
+    if (std::isnan(a)) return quiet(a);
+    if (std::isnan(b)) return quiet(b);
+  }
+  return result;
+}
+
+static inline float ExecuteF32Min(float a, float b, TrapReason* trap) {
+  if (std::isnan(a)) return quiet(a);
+  if (std::isnan(b)) return quiet(b);
+  return std::min(a, b);
+}
+
+static inline float ExecuteF32Max(float a, float b, TrapReason* trap) {
+  if (std::isnan(a)) return quiet(a);
+  if (std::isnan(b)) return quiet(b);
+  return std::max(a, b);
+}
+
+static inline float ExecuteF32CopySign(float a, float b, TrapReason* trap) {
+  return copysignf(a, b);
+}
+
+static inline double ExecuteF64Sub(double a, double b, TrapReason* trap) {
+  double result = a - b;
+  // Some architectures (e.g. MIPS) need extra checking to preserve the payload
+  // of a NaN operand.
+  if (result - result != 0) {
+    if (std::isnan(a)) return quiet(a);
+    if (std::isnan(b)) return quiet(b);
+  }
+  return result;
+}
+
+static inline double ExecuteF64Min(double a, double b, TrapReason* trap) {
+  if (std::isnan(a)) return quiet(a);
+  if (std::isnan(b)) return quiet(b);
+  return std::min(a, b);
+}
+
+static inline double ExecuteF64Max(double a, double b, TrapReason* trap) {
+  if (std::isnan(a)) return quiet(a);
+  if (std::isnan(b)) return quiet(b);
+  return std::max(a, b);
+}
+
+static inline double ExecuteF64CopySign(double a, double b, TrapReason* trap) {
+  return copysign(a, b);
+}
+
+static inline int32_t ExecuteI32AsmjsDivS(int32_t a, int32_t b,
+                                          TrapReason* trap) {
+  if (b == 0) return 0;
+  if (b == -1 && a == std::numeric_limits<int32_t>::min()) {
+    return std::numeric_limits<int32_t>::min();
+  }
+  return a / b;
+}
+
+static inline uint32_t ExecuteI32AsmjsDivU(uint32_t a, uint32_t b,
+                                           TrapReason* trap) {
+  if (b == 0) return 0;
+  return a / b;
+}
+
+static inline int32_t ExecuteI32AsmjsRemS(int32_t a, int32_t b,
+                                          TrapReason* trap) {
+  if (b == 0) return 0;
+  if (b == -1) return 0;
+  return a % b;
+}
+
+static inline uint32_t ExecuteI32AsmjsRemU(uint32_t a, uint32_t b,
+                                           TrapReason* trap) {
+  if (b == 0) return 0;
+  return a % b;
+}
+
+static inline int32_t ExecuteI32AsmjsSConvertF32(float a, TrapReason* trap) {
+  return DoubleToInt32(a);
+}
+
+static inline uint32_t ExecuteI32AsmjsUConvertF32(float a, TrapReason* trap) {
+  return DoubleToUint32(a);
+}
+
+static inline int32_t ExecuteI32AsmjsSConvertF64(double a, TrapReason* trap) {
+  return DoubleToInt32(a);
+}
+
+static inline uint32_t ExecuteI32AsmjsUConvertF64(double a, TrapReason* trap) {
+  return DoubleToUint32(a);
+}
+
+static int32_t ExecuteI32Clz(uint32_t val, TrapReason* trap) {
+  return base::bits::CountLeadingZeros32(val);
+}
+
+static uint32_t ExecuteI32Ctz(uint32_t val, TrapReason* trap) {
+  return base::bits::CountTrailingZeros32(val);
+}
+
+static uint32_t ExecuteI32Popcnt(uint32_t val, TrapReason* trap) {
+  return word32_popcnt_wrapper(&val);
+}
+
+static inline uint32_t ExecuteI32Eqz(uint32_t val, TrapReason* trap) {
+  return val == 0 ? 1 : 0;
+}
+
+static int64_t ExecuteI64Clz(uint64_t val, TrapReason* trap) {
+  return base::bits::CountLeadingZeros64(val);
+}
+
+static inline uint64_t ExecuteI64Ctz(uint64_t val, TrapReason* trap) {
+  return base::bits::CountTrailingZeros64(val);
+}
+
+static inline int64_t ExecuteI64Popcnt(uint64_t val, TrapReason* trap) {
+  return word64_popcnt_wrapper(&val);
+}
+
+static inline int32_t ExecuteI64Eqz(uint64_t val, TrapReason* trap) {
+  return val == 0 ? 1 : 0;
+}
+
+static inline float ExecuteF32Abs(float a, TrapReason* trap) {
+  return bit_cast<float>(bit_cast<uint32_t>(a) & 0x7fffffff);
+}
+
+static inline float ExecuteF32Neg(float a, TrapReason* trap) {
+  return bit_cast<float>(bit_cast<uint32_t>(a) ^ 0x80000000);
+}
+
+static inline float ExecuteF32Ceil(float a, TrapReason* trap) {
+  return ceilf(a);
+}
+
+static inline float ExecuteF32Floor(float a, TrapReason* trap) {
+  return floorf(a);
+}
+
+static inline float ExecuteF32Trunc(float a, TrapReason* trap) {
+  return truncf(a);
+}
+
+static inline float ExecuteF32NearestInt(float a, TrapReason* trap) {
+  return nearbyintf(a);
+}
+
+static inline float ExecuteF32Sqrt(float a, TrapReason* trap) {
+  return sqrtf(a);
+}
+
+static inline double ExecuteF64Abs(double a, TrapReason* trap) {
+  return bit_cast<double>(bit_cast<uint64_t>(a) & 0x7fffffffffffffff);
+}
+
+static inline double ExecuteF64Neg(double a, TrapReason* trap) {
+  return bit_cast<double>(bit_cast<uint64_t>(a) ^ 0x8000000000000000);
+}
+
+static inline double ExecuteF64Ceil(double a, TrapReason* trap) {
+  return ceil(a);
+}
+
+static inline double ExecuteF64Floor(double a, TrapReason* trap) {
+  return floor(a);
+}
+
+static inline double ExecuteF64Trunc(double a, TrapReason* trap) {
+  return trunc(a);
+}
+
+static inline double ExecuteF64NearestInt(double a, TrapReason* trap) {
+  return nearbyint(a);
+}
+
+static inline double ExecuteF64Sqrt(double a, TrapReason* trap) {
+  return sqrt(a);
+}
+
+static int32_t ExecuteI32SConvertF32(float a, TrapReason* trap) {
+  if (a < static_cast<float>(INT32_MAX) && a >= static_cast<float>(INT32_MIN)) {
+    return static_cast<int32_t>(a);
+  }
+  *trap = kTrapFloatUnrepresentable;
+  return 0;
+}
+
+static int32_t ExecuteI32SConvertF64(double a, TrapReason* trap) {
+  if (a < (static_cast<double>(INT32_MAX) + 1.0) &&
+      a > (static_cast<double>(INT32_MIN) - 1.0)) {
+    return static_cast<int32_t>(a);
+  }
+  *trap = kTrapFloatUnrepresentable;
+  return 0;
+}
+
+static uint32_t ExecuteI32UConvertF32(float a, TrapReason* trap) {
+  if (a < (static_cast<float>(UINT32_MAX) + 1.0) && a > -1) {
+    return static_cast<uint32_t>(a);
+  }
+  *trap = kTrapFloatUnrepresentable;
+  return 0;
+}
+
+static uint32_t ExecuteI32UConvertF64(double a, TrapReason* trap) {
+  if (a < (static_cast<float>(UINT32_MAX) + 1.0) && a > -1) {
+    return static_cast<uint32_t>(a);
+  }
+  *trap = kTrapFloatUnrepresentable;
+  return 0;
+}
+
+static inline uint32_t ExecuteI32ConvertI64(int64_t a, TrapReason* trap) {
+  return static_cast<uint32_t>(a & 0xFFFFFFFF);
+}
+
+static int64_t ExecuteI64SConvertF32(float a, TrapReason* trap) {
+  int64_t output;
+  if (!float32_to_int64_wrapper(&a, &output)) {
+    *trap = kTrapFloatUnrepresentable;
+  }
+  return output;
+}
+
+static int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) {
+  int64_t output;
+  if (!float64_to_int64_wrapper(&a, &output)) {
+    *trap = kTrapFloatUnrepresentable;
+  }
+  return output;
+}
+
+static uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) {
+  uint64_t output;
+  if (!float32_to_uint64_wrapper(&a, &output)) {
+    *trap = kTrapFloatUnrepresentable;
+  }
+  return output;
+}
+
+static uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) {
+  uint64_t output;
+  if (!float64_to_uint64_wrapper(&a, &output)) {
+    *trap = kTrapFloatUnrepresentable;
+  }
+  return output;
+}
+
+static inline int64_t ExecuteI64SConvertI32(int32_t a, TrapReason* trap) {
+  return static_cast<int64_t>(a);
+}
+
+static inline int64_t ExecuteI64UConvertI32(uint32_t a, TrapReason* trap) {
+  return static_cast<uint64_t>(a);
+}
+
+static inline float ExecuteF32SConvertI32(int32_t a, TrapReason* trap) {
+  return static_cast<float>(a);
+}
+
+static inline float ExecuteF32UConvertI32(uint32_t a, TrapReason* trap) {
+  return static_cast<float>(a);
+}
+
+static inline float ExecuteF32SConvertI64(int64_t a, TrapReason* trap) {
+  float output;
+  int64_to_float32_wrapper(&a, &output);
+  return output;
+}
+
+static inline float ExecuteF32UConvertI64(uint64_t a, TrapReason* trap) {
+  float output;
+  uint64_to_float32_wrapper(&a, &output);
+  return output;
+}
+
+static inline float ExecuteF32ConvertF64(double a, TrapReason* trap) {
+  return static_cast<float>(a);
+}
+
+static inline float ExecuteF32ReinterpretI32(int32_t a, TrapReason* trap) {
+  return bit_cast<float>(a);
+}
+
+static inline double ExecuteF64SConvertI32(int32_t a, TrapReason* trap) {
+  return static_cast<double>(a);
+}
+
+static inline double ExecuteF64UConvertI32(uint32_t a, TrapReason* trap) {
+  return static_cast<double>(a);
+}
+
+static inline double ExecuteF64SConvertI64(int64_t a, TrapReason* trap) {
+  double output;
+  int64_to_float64_wrapper(&a, &output);
+  return output;
+}
+
+static inline double ExecuteF64UConvertI64(uint64_t a, TrapReason* trap) {
+  double output;
+  uint64_to_float64_wrapper(&a, &output);
+  return output;
+}
+
+static inline double ExecuteF64ConvertF32(float a, TrapReason* trap) {
+  return static_cast<double>(a);
+}
+
+static inline double ExecuteF64ReinterpretI64(int64_t a, TrapReason* trap) {
+  return bit_cast<double>(a);
+}
+
+static inline int32_t ExecuteI32ReinterpretF32(float a, TrapReason* trap) {
+  return bit_cast<int32_t>(a);
+}
+
+static inline int64_t ExecuteI64ReinterpretF64(double a, TrapReason* trap) {
+  return bit_cast<int64_t>(a);
+}
+
+enum InternalOpcode {
+#define DECL_INTERNAL_ENUM(name, value) kInternal##name = value,
+  FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_ENUM)
+#undef DECL_INTERNAL_ENUM
+};
+
+static const char* OpcodeName(uint32_t val) {
+  switch (val) {
+#define DECL_INTERNAL_CASE(name, value) \
+  case kInternal##name:                 \
+    return "Internal" #name;
+    FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_CASE)
+#undef DECL_INTERNAL_CASE
+  }
+  return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(val));
+}
+
+static const int kRunSteps = 1000;
+
+// A helper class to compute the control transfers for each bytecode offset.
+// Control transfers allow Br, BrIf, BrTable, If, Else, and End bytecodes to
+// be directly executed without the need to dynamically track blocks.
+class ControlTransfers : public ZoneObject {
+ public:
+  ControlTransferMap map_;
+
+  ControlTransfers(Zone* zone, size_t locals_encoded_size, const byte* start,
+                   const byte* end)
+      : map_(zone) {
+    // A control reference including from PC, from value depth, and whether
+    // a value is explicitly passed (e.g. br/br_if/br_table with value).
+    struct CRef {
+      const byte* pc;
+      sp_t value_depth;
+      bool explicit_value;
+    };
+
+    // Represents a control flow label.
+    struct CLabel : public ZoneObject {
+      const byte* target;
+      size_t value_depth;
+      ZoneVector<CRef> refs;
+
+      CLabel(Zone* zone, size_t v)
+          : target(nullptr), value_depth(v), refs(zone) {}
+
+      // Bind this label to the given PC.
+      void Bind(ControlTransferMap* map, const byte* start, const byte* pc,
+                bool expect_value) {
+        DCHECK_NULL(target);
+        target = pc;
+        for (auto from : refs) {
+          auto pcdiff = static_cast<pcdiff_t>(target - from.pc);
+          auto spdiff = static_cast<spdiff_t>(from.value_depth - value_depth);
+          ControlTransfer::StackAction action = ControlTransfer::kNoAction;
+          if (expect_value && !from.explicit_value) {
+            action = spdiff == 0 ? ControlTransfer::kPushVoid
+                                 : ControlTransfer::kPopAndRepush;
+          }
+          pc_t offset = static_cast<size_t>(from.pc - start);
+          (*map)[offset] = {pcdiff, spdiff, action};
+        }
+      }
+
+      // Reference this label from the given location.
+      void Ref(ControlTransferMap* map, const byte* start, CRef from) {
+        DCHECK_GE(from.value_depth, value_depth);
+        if (target) {
+          auto pcdiff = static_cast<pcdiff_t>(target - from.pc);
+          auto spdiff = static_cast<spdiff_t>(from.value_depth - value_depth);
+          pc_t offset = static_cast<size_t>(from.pc - start);
+          (*map)[offset] = {pcdiff, spdiff, ControlTransfer::kNoAction};
+        } else {
+          refs.push_back(from);
+        }
+      }
+    };
+
+    // An entry in the control stack.
+    struct Control {
+      const byte* pc;
+      CLabel* end_label;
+      CLabel* else_label;
+
+      void Ref(ControlTransferMap* map, const byte* start, const byte* from_pc,
+               size_t from_value_depth, bool explicit_value) {
+        end_label->Ref(map, start, {from_pc, from_value_depth, explicit_value});
+      }
+    };
+
+    // Compute the ControlTransfer map.
+    // This works by maintaining a stack of control constructs similar to the
+    // AST decoder. The {control_stack} allows matching {br,br_if,br_table}
+    // bytecodes with their target, as well as determining whether the current
+    // bytecodes are within the true or false block of an else.
+    // The value stack depth is tracked as {value_depth} and is needed to
+    // determine how many values to pop off the stack for explicit and
+    // implicit control flow.
+
+    std::vector<Control> control_stack;
+    size_t value_depth = 0;
+    Decoder decoder(start, end);  // for reading operands.
+    const byte* pc = start + locals_encoded_size;
+
+    while (pc < end) {
+      WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
+      TRACE("@%td: control %s (depth = %zu)\n", (pc - start),
+            WasmOpcodes::OpcodeName(opcode), value_depth);
+      switch (opcode) {
+        case kExprBlock: {
+          TRACE("control @%td $%zu: Block\n", (pc - start), value_depth);
+          CLabel* label = new (zone) CLabel(zone, value_depth);
+          control_stack.push_back({pc, label, nullptr});
+          break;
+        }
+        case kExprLoop: {
+          TRACE("control @%td $%zu: Loop\n", (pc - start), value_depth);
+          CLabel* label1 = new (zone) CLabel(zone, value_depth);
+          CLabel* label2 = new (zone) CLabel(zone, value_depth);
+          control_stack.push_back({pc, label1, nullptr});
+          control_stack.push_back({pc, label2, nullptr});
+          label2->Bind(&map_, start, pc, false);
+          break;
+        }
+        case kExprIf: {
+          TRACE("control @%td $%zu: If\n", (pc - start), value_depth);
+          value_depth--;
+          CLabel* end_label = new (zone) CLabel(zone, value_depth);
+          CLabel* else_label = new (zone) CLabel(zone, value_depth);
+          control_stack.push_back({pc, end_label, else_label});
+          else_label->Ref(&map_, start, {pc, value_depth, false});
+          break;
+        }
+        case kExprElse: {
+          Control* c = &control_stack.back();
+          TRACE("control @%td $%zu: Else\n", (pc - start), value_depth);
+          c->end_label->Ref(&map_, start, {pc, value_depth, false});
+          value_depth = c->end_label->value_depth;
+          DCHECK_NOT_NULL(c->else_label);
+          c->else_label->Bind(&map_, start, pc + 1, false);
+          c->else_label = nullptr;
+          break;
+        }
+        case kExprEnd: {
+          Control* c = &control_stack.back();
+          TRACE("control @%td $%zu: End\n", (pc - start), value_depth);
+          if (c->end_label->target) {
+            // only loops have bound labels.
+            DCHECK_EQ(kExprLoop, *c->pc);
+            control_stack.pop_back();
+            c = &control_stack.back();
+          }
+          if (c->else_label) c->else_label->Bind(&map_, start, pc + 1, true);
+          c->end_label->Ref(&map_, start, {pc, value_depth, false});
+          c->end_label->Bind(&map_, start, pc + 1, true);
+          value_depth = c->end_label->value_depth + 1;
+          control_stack.pop_back();
+          break;
+        }
+        case kExprBr: {
+          BreakDepthOperand operand(&decoder, pc);
+          TRACE("control @%td $%zu: Br[arity=%u, depth=%u]\n", (pc - start),
+                value_depth, operand.arity, operand.depth);
+          value_depth -= operand.arity;
+          control_stack[control_stack.size() - operand.depth - 1].Ref(
+              &map_, start, pc, value_depth, operand.arity > 0);
+          value_depth++;
+          break;
+        }
+        case kExprBrIf: {
+          BreakDepthOperand operand(&decoder, pc);
+          TRACE("control @%td $%zu: BrIf[arity=%u, depth=%u]\n", (pc - start),
+                value_depth, operand.arity, operand.depth);
+          value_depth -= (operand.arity + 1);
+          control_stack[control_stack.size() - operand.depth - 1].Ref(
+              &map_, start, pc, value_depth, operand.arity > 0);
+          value_depth++;
+          break;
+        }
+        case kExprBrTable: {
+          BranchTableOperand operand(&decoder, pc);
+          TRACE("control @%td $%zu: BrTable[arity=%u count=%u]\n", (pc - start),
+                value_depth, operand.arity, operand.table_count);
+          value_depth -= (operand.arity + 1);
+          for (uint32_t i = 0; i < operand.table_count + 1; ++i) {
+            uint32_t target = operand.read_entry(&decoder, i);
+            control_stack[control_stack.size() - target - 1].Ref(
+                &map_, start, pc + i, value_depth, operand.arity > 0);
+          }
+          value_depth++;
+          break;
+        }
+        default: {
+          value_depth = value_depth - OpcodeArity(pc, end) + 1;
+          break;
+        }
+      }
+
+      pc += OpcodeLength(pc, end);
+    }
+  }
+
+  ControlTransfer Lookup(pc_t from) {
+    auto result = map_.find(from);
+    if (result == map_.end()) {
+      V8_Fatal(__FILE__, __LINE__, "no control target for pc %zu", from);
+    }
+    return result->second;
+  }
+};
+
+// Code and metadata needed to execute a function.
+struct InterpreterCode {
+  const WasmFunction* function;  // wasm function
+  AstLocalDecls locals;          // local declarations
+  const byte* orig_start;        // start of original code
+  const byte* orig_end;          // end of original code
+  byte* start;                   // start of (maybe altered) code
+  byte* end;                     // end of (maybe altered) code
+  ControlTransfers* targets;     // helper for control flow.
+
+  const byte* at(pc_t pc) { return start + pc; }
+};
+
+// The main storage for interpreter code. It maps {WasmFunction} to the
+// metadata needed to execute each function.
+class CodeMap {
+ public:
+  Zone* zone_;
+  const WasmModule* module_;
+  ZoneVector<InterpreterCode> interpreter_code_;
+
+  CodeMap(const WasmModule* module, Zone* zone)
+      : zone_(zone), module_(module), interpreter_code_(zone) {
+    if (module == nullptr) return;
+    for (size_t i = 0; i < module->functions.size(); ++i) {
+      const WasmFunction* function = &module->functions[i];
+      const byte* code_start =
+          module->module_start + function->code_start_offset;
+      const byte* code_end = module->module_start + function->code_end_offset;
+      AddFunction(function, code_start, code_end);
+    }
+  }
+
+  InterpreterCode* FindCode(const WasmFunction* function) {
+    if (function->func_index < interpreter_code_.size()) {
+      InterpreterCode* code = &interpreter_code_[function->func_index];
+      DCHECK_EQ(function, code->function);
+      return code;
+    }
+    return nullptr;
+  }
+
+  InterpreterCode* GetCode(uint32_t function_index) {
+    CHECK_LT(function_index, interpreter_code_.size());
+    return Preprocess(&interpreter_code_[function_index]);
+  }
+
+  InterpreterCode* GetIndirectCode(uint32_t indirect_index) {
+    if (indirect_index >= module_->function_table.size()) return nullptr;
+    uint32_t index = module_->function_table[indirect_index];
+    if (index >= interpreter_code_.size()) return nullptr;
+    return GetCode(index);
+  }
+
+  InterpreterCode* Preprocess(InterpreterCode* code) {
+    if (code->targets == nullptr && code->start) {
+      // Compute the control targets map and the local declarations.
+      CHECK(DecodeLocalDecls(code->locals, code->start, code->end));
+      code->targets =
+          new (zone_) ControlTransfers(zone_, code->locals.decls_encoded_size,
+                                       code->orig_start, code->orig_end);
+    }
+    return code;
+  }
+
+  int AddFunction(const WasmFunction* function, const byte* code_start,
+                  const byte* code_end) {
+    InterpreterCode code = {
+        function, AstLocalDecls(zone_),          code_start,
+        code_end, const_cast<byte*>(code_start), const_cast<byte*>(code_end),
+        nullptr};
+
+    DCHECK_EQ(interpreter_code_.size(), function->func_index);
+    interpreter_code_.push_back(code);
+    return static_cast<int>(interpreter_code_.size()) - 1;
+  }
+
+  bool SetFunctionCode(const WasmFunction* function, const byte* start,
+                       const byte* end) {
+    InterpreterCode* code = FindCode(function);
+    if (code == nullptr) return false;
+    code->targets = nullptr;
+    code->orig_start = start;
+    code->orig_end = end;
+    code->start = const_cast<byte*>(start);
+    code->end = const_cast<byte*>(end);
+    Preprocess(code);
+    return true;
+  }
+};
+
+// Responsible for executing code directly.
+class ThreadImpl : public WasmInterpreter::Thread {
+ public:
+  ThreadImpl(Zone* zone, CodeMap* codemap, WasmModuleInstance* instance)
+      : codemap_(codemap),
+        instance_(instance),
+        stack_(zone),
+        frames_(zone),
+        state_(WasmInterpreter::STOPPED),
+        break_pc_(kInvalidPc),
+        trap_reason_(kTrapCount) {}
+
+  virtual ~ThreadImpl() {}
+
+  //==========================================================================
+  // Implementation of public interface for WasmInterpreter::Thread.
+  //==========================================================================
+
+  virtual WasmInterpreter::State state() { return state_; }
+
+  virtual void PushFrame(const WasmFunction* function, WasmVal* args) {
+    InterpreterCode* code = codemap()->FindCode(function);
+    CHECK_NOT_NULL(code);
+    frames_.push_back({code, 0, 0, stack_.size()});
+    for (size_t i = 0; i < function->sig->parameter_count(); ++i) {
+      stack_.push_back(args[i]);
+    }
+    frames_.back().ret_pc = InitLocals(code);
+    TRACE("  => PushFrame(#%u @%zu)\n", code->function->func_index,
+          frames_.back().ret_pc);
+  }
+
+  virtual WasmInterpreter::State Run() {
+    do {
+      TRACE("  => Run()\n");
+      if (state_ == WasmInterpreter::STOPPED ||
+          state_ == WasmInterpreter::PAUSED) {
+        state_ = WasmInterpreter::RUNNING;
+        Execute(frames_.back().code, frames_.back().ret_pc, kRunSteps);
+      }
+    } while (state_ == WasmInterpreter::STOPPED);
+    return state_;
+  }
+
+  virtual WasmInterpreter::State Step() {
+    TRACE("  => Step()\n");
+    if (state_ == WasmInterpreter::STOPPED ||
+        state_ == WasmInterpreter::PAUSED) {
+      state_ = WasmInterpreter::RUNNING;
+      Execute(frames_.back().code, frames_.back().ret_pc, 1);
+    }
+    return state_;
+  }
+
+  virtual void Pause() { UNIMPLEMENTED(); }
+
+  virtual void Reset() {
+    TRACE("----- RESET -----\n");
+    stack_.clear();
+    frames_.clear();
+    state_ = WasmInterpreter::STOPPED;
+    trap_reason_ = kTrapCount;
+  }
+
+  virtual int GetFrameCount() { return static_cast<int>(frames_.size()); }
+
+  virtual const WasmFrame* GetFrame(int index) {
+    UNIMPLEMENTED();
+    return nullptr;
+  }
+
+  virtual WasmFrame* GetMutableFrame(int index) {
+    UNIMPLEMENTED();
+    return nullptr;
+  }
+
+  virtual WasmVal GetReturnValue() {
+    if (state_ == WasmInterpreter::TRAPPED) return WasmVal(0xdeadbeef);
+    CHECK_EQ(WasmInterpreter::FINISHED, state_);
+    CHECK_EQ(1, stack_.size());
+    return stack_[0];
+  }
+
+  virtual pc_t GetBreakpointPc() { return break_pc_; }
+
+  bool Terminated() {
+    return state_ == WasmInterpreter::TRAPPED ||
+           state_ == WasmInterpreter::FINISHED;
+  }
+
+ private:
+  // Entries on the stack of functions being evaluated.
+  struct Frame {
+    InterpreterCode* code;
+    pc_t call_pc;
+    pc_t ret_pc;
+    sp_t sp;
+
+    // Limit of parameters.
+    sp_t plimit() { return sp + code->function->sig->parameter_count(); }
+    // Limit of locals.
+    sp_t llimit() { return plimit() + code->locals.total_local_count; }
+  };
+
+  CodeMap* codemap_;
+  WasmModuleInstance* instance_;
+  ZoneVector<WasmVal> stack_;
+  ZoneVector<Frame> frames_;
+  WasmInterpreter::State state_;
+  pc_t break_pc_;
+  TrapReason trap_reason_;
+
+  CodeMap* codemap() { return codemap_; }
+  WasmModuleInstance* instance() { return instance_; }
+  const WasmModule* module() { return instance_->module; }
+
+  void DoTrap(TrapReason trap, pc_t pc) {
+    state_ = WasmInterpreter::TRAPPED;
+    trap_reason_ = trap;
+    CommitPc(pc);
+  }
+
+  // Push a frame with arguments already on the stack.
+  void PushFrame(InterpreterCode* code, pc_t call_pc, pc_t ret_pc) {
+    CHECK_NOT_NULL(code);
+    DCHECK(!frames_.empty());
+    frames_.back().call_pc = call_pc;
+    frames_.back().ret_pc = ret_pc;
+    size_t arity = code->function->sig->parameter_count();
+    DCHECK_GE(stack_.size(), arity);
+    // The parameters will overlap the arguments already on the stack.
+    frames_.push_back({code, 0, 0, stack_.size() - arity});
+    frames_.back().ret_pc = InitLocals(code);
+    TRACE("  => push func#%u @%zu\n", code->function->func_index,
+          frames_.back().ret_pc);
+  }
+
+  pc_t InitLocals(InterpreterCode* code) {
+    for (auto p : code->locals.local_types) {
+      WasmVal val;
+      switch (p.first) {
+        case kAstI32:
+          val = WasmVal(static_cast<int32_t>(0));
+          break;
+        case kAstI64:
+          val = WasmVal(static_cast<int64_t>(0));
+          break;
+        case kAstF32:
+          val = WasmVal(static_cast<float>(0));
+          break;
+        case kAstF64:
+          val = WasmVal(static_cast<double>(0));
+          break;
+        default:
+          UNREACHABLE();
+          break;
+      }
+      stack_.insert(stack_.end(), p.second, val);
+    }
+    return code->locals.decls_encoded_size;
+  }
+
+  void CommitPc(pc_t pc) {
+    if (!frames_.empty()) {
+      frames_.back().ret_pc = pc;
+    }
+  }
+
+  bool SkipBreakpoint(InterpreterCode* code, pc_t pc) {
+    if (pc == break_pc_) {
+      break_pc_ = kInvalidPc;
+      return true;
+    }
+    return false;
+  }
+
+  bool DoReturn(InterpreterCode** code, pc_t* pc, pc_t* limit, WasmVal val) {
+    DCHECK_GT(frames_.size(), 0u);
+    stack_.resize(frames_.back().sp);
+    frames_.pop_back();
+    if (frames_.size() == 0) {
+      // A return from the top frame terminates the execution.
+      state_ = WasmInterpreter::FINISHED;
+      stack_.clear();
+      stack_.push_back(val);
+      TRACE("  => finish\n");
+      return false;
+    } else {
+      // Return to caller frame.
+      Frame* top = &frames_.back();
+      *code = top->code;
+      *pc = top->ret_pc;
+      *limit = top->code->end - top->code->start;
+      if (top->code->start[top->call_pc] == kExprCallIndirect ||
+          (top->code->orig_start &&
+           top->code->orig_start[top->call_pc] == kExprCallIndirect)) {
+        // UGLY: An indirect call has the additional function index on the
+        // stack.
+        stack_.pop_back();
+      }
+      TRACE("  => pop func#%u @%zu\n", (*code)->function->func_index, *pc);
+
+      stack_.push_back(val);
+      return true;
+    }
+  }
+
+  void DoCall(InterpreterCode* target, pc_t* pc, pc_t ret_pc, pc_t* limit) {
+    PushFrame(target, *pc, ret_pc);
+    *pc = frames_.back().ret_pc;
+    *limit = target->end - target->start;
+  }
+
+  // Adjust the program counter {pc} and the stack contents according to the
+  // code's precomputed control transfer map. Returns the different between
+  // the new pc and the old pc.
+  int DoControlTransfer(InterpreterCode* code, pc_t pc) {
+    auto target = code->targets->Lookup(pc);
+    switch (target.action) {
+      case ControlTransfer::kNoAction:
+        TRACE("  action [sp-%u]\n", target.spdiff);
+        PopN(target.spdiff);
+        break;
+      case ControlTransfer::kPopAndRepush: {
+        WasmVal val = Pop();
+        TRACE("  action [pop x, sp-%u, push x]\n", target.spdiff - 1);
+        DCHECK_GE(target.spdiff, 1u);
+        PopN(target.spdiff - 1);
+        Push(pc, val);
+        break;
+      }
+      case ControlTransfer::kPushVoid:
+        TRACE("  action [sp-%u, push void]\n", target.spdiff);
+        PopN(target.spdiff);
+        Push(pc, WasmVal());
+        break;
+    }
+    return target.pcdiff;
+  }
+
+  void Execute(InterpreterCode* code, pc_t pc, int max) {
+    Decoder decoder(code->start, code->end);
+    pc_t limit = code->end - code->start;
+    while (true) {
+      if (max-- <= 0) {
+        // Maximum number of instructions reached.
+        state_ = WasmInterpreter::PAUSED;
+        return CommitPc(pc);
+      }
+
+      if (pc >= limit) {
+        // Fell off end of code; do an implicit return.
+        TRACE("@%-3zu: ImplicitReturn\n", pc);
+        WasmVal val = PopArity(code->function->sig->return_count());
+        if (!DoReturn(&code, &pc, &limit, val)) return;
+        decoder.Reset(code->start, code->end);
+        continue;
+      }
+
+      const char* skip = "        ";
+      int len = 1;
+      byte opcode = code->start[pc];
+      byte orig = opcode;
+      if (opcode == kInternalBreakpoint) {
+        orig = code->orig_start[pc];
+        if (SkipBreakpoint(code, pc)) {
+          // skip breakpoint by switching on original code.
+          skip = "[skip]  ";
+        } else {
+          state_ = WasmInterpreter::PAUSED;
+          TRACE("@%-3zu: [break] %-24s:", pc,
+                WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(orig)));
+          TraceValueStack();
+          TRACE("\n");
+          break_pc_ = pc;
+          return CommitPc(pc);
+        }
+      }
+
+      USE(skip);
+      TRACE("@%-3zu: %s%-24s:", pc, skip,
+            WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(orig)));
+      TraceValueStack();
+      TRACE("\n");
+
+      switch (orig) {
+        case kExprNop:
+          Push(pc, WasmVal());
+          break;
+        case kExprBlock:
+        case kExprLoop: {
+          // Do nothing.
+          break;
+        }
+        case kExprIf: {
+          WasmVal cond = Pop();
+          bool is_true = cond.to<uint32_t>() != 0;
+          if (is_true) {
+            // fall through to the true block.
+            TRACE("  true => fallthrough\n");
+          } else {
+            len = DoControlTransfer(code, pc);
+            TRACE("  false => @%zu\n", pc + len);
+          }
+          break;
+        }
+        case kExprElse: {
+          len = DoControlTransfer(code, pc);
+          TRACE("  end => @%zu\n", pc + len);
+          break;
+        }
+        case kExprSelect: {
+          WasmVal cond = Pop();
+          WasmVal fval = Pop();
+          WasmVal tval = Pop();
+          Push(pc, cond.to<int32_t>() != 0 ? tval : fval);
+          break;
+        }
+        case kExprBr: {
+          BreakDepthOperand operand(&decoder, code->at(pc));
+          WasmVal val = PopArity(operand.arity);
+          len = DoControlTransfer(code, pc);
+          TRACE("  br => @%zu\n", pc + len);
+          if (operand.arity > 0) Push(pc, val);
+          break;
+        }
+        case kExprBrIf: {
+          BreakDepthOperand operand(&decoder, code->at(pc));
+          WasmVal cond = Pop();
+          WasmVal val = PopArity(operand.arity);
+          bool is_true = cond.to<uint32_t>() != 0;
+          if (is_true) {
+            len = DoControlTransfer(code, pc);
+            TRACE("  br_if => @%zu\n", pc + len);
+            if (operand.arity > 0) Push(pc, val);
+          } else {
+            TRACE("  false => fallthrough\n");
+            len = 1 + operand.length;
+            Push(pc, WasmVal());
+          }
+          break;
+        }
+        case kExprBrTable: {
+          BranchTableOperand operand(&decoder, code->at(pc));
+          uint32_t key = Pop().to<uint32_t>();
+          WasmVal val = PopArity(operand.arity);
+          if (key >= operand.table_count) key = operand.table_count;
+          len = DoControlTransfer(code, pc + key) + key;
+          TRACE("  br[%u] => @%zu\n", key, pc + len);
+          if (operand.arity > 0) Push(pc, val);
+          break;
+        }
+        case kExprReturn: {
+          ReturnArityOperand operand(&decoder, code->at(pc));
+          WasmVal val = PopArity(operand.arity);
+          if (!DoReturn(&code, &pc, &limit, val)) return;
+          decoder.Reset(code->start, code->end);
+          continue;
+        }
+        case kExprUnreachable: {
+          DoTrap(kTrapUnreachable, pc);
+          return CommitPc(pc);
+        }
+        case kExprEnd: {
+          len = DoControlTransfer(code, pc);
+          DCHECK_EQ(1, len);
+          break;
+        }
+        case kExprI8Const: {
+          ImmI8Operand operand(&decoder, code->at(pc));
+          Push(pc, WasmVal(operand.value));
+          len = 1 + operand.length;
+          break;
+        }
+        case kExprI32Const: {
+          ImmI32Operand operand(&decoder, code->at(pc));
+          Push(pc, WasmVal(operand.value));
+          len = 1 + operand.length;
+          break;
+        }
+        case kExprI64Const: {
+          ImmI64Operand operand(&decoder, code->at(pc));
+          Push(pc, WasmVal(operand.value));
+          len = 1 + operand.length;
+          break;
+        }
+        case kExprF32Const: {
+          ImmF32Operand operand(&decoder, code->at(pc));
+          Push(pc, WasmVal(operand.value));
+          len = 1 + operand.length;
+          break;
+        }
+        case kExprF64Const: {
+          ImmF64Operand operand(&decoder, code->at(pc));
+          Push(pc, WasmVal(operand.value));
+          len = 1 + operand.length;
+          break;
+        }
+        case kExprGetLocal: {
+          LocalIndexOperand operand(&decoder, code->at(pc));
+          Push(pc, stack_[frames_.back().sp + operand.index]);
+          len = 1 + operand.length;
+          break;
+        }
+        case kExprSetLocal: {
+          LocalIndexOperand operand(&decoder, code->at(pc));
+          WasmVal val = Pop();
+          stack_[frames_.back().sp + operand.index] = val;
+          Push(pc, val);
+          len = 1 + operand.length;
+          break;
+        }
+        case kExprCallFunction: {
+          CallFunctionOperand operand(&decoder, code->at(pc));
+          InterpreterCode* target = codemap()->GetCode(operand.index);
+          DoCall(target, &pc, pc + 1 + operand.length, &limit);
+          code = target;
+          decoder.Reset(code->start, code->end);
+          continue;
+        }
+        case kExprCallIndirect: {
+          CallIndirectOperand operand(&decoder, code->at(pc));
+          size_t index = stack_.size() - operand.arity - 1;
+          DCHECK_LT(index, stack_.size());
+          uint32_t table_index = stack_[index].to<uint32_t>();
+          if (table_index >= module()->function_table.size()) {
+            return DoTrap(kTrapFuncInvalid, pc);
+          }
+          uint16_t function_index = module()->function_table[table_index];
+          InterpreterCode* target = codemap()->GetCode(function_index);
+          DCHECK(target);
+          if (target->function->sig_index != operand.index) {
+            return DoTrap(kTrapFuncSigMismatch, pc);
+          }
+
+          DoCall(target, &pc, pc + 1 + operand.length, &limit);
+          code = target;
+          decoder.Reset(code->start, code->end);
+          continue;
+        }
+        case kExprCallImport: {
+          UNIMPLEMENTED();
+          break;
+        }
+        case kExprLoadGlobal: {
+          GlobalIndexOperand operand(&decoder, code->at(pc));
+          const WasmGlobal* global = &module()->globals[operand.index];
+          byte* ptr = instance()->globals_start + global->offset;
+          MachineType type = global->type;
+          WasmVal val;
+          if (type == MachineType::Int8()) {
+            val =
+                WasmVal(static_cast<int32_t>(*reinterpret_cast<int8_t*>(ptr)));
+          } else if (type == MachineType::Uint8()) {
+            val =
+                WasmVal(static_cast<int32_t>(*reinterpret_cast<uint8_t*>(ptr)));
+          } else if (type == MachineType::Int16()) {
+            val =
+                WasmVal(static_cast<int32_t>(*reinterpret_cast<int16_t*>(ptr)));
+          } else if (type == MachineType::Uint16()) {
+            val = WasmVal(
+                static_cast<int32_t>(*reinterpret_cast<uint16_t*>(ptr)));
+          } else if (type == MachineType::Int32()) {
+            val = WasmVal(*reinterpret_cast<int32_t*>(ptr));
+          } else if (type == MachineType::Uint32()) {
+            val = WasmVal(*reinterpret_cast<uint32_t*>(ptr));
+          } else if (type == MachineType::Int64()) {
+            val = WasmVal(*reinterpret_cast<int64_t*>(ptr));
+          } else if (type == MachineType::Uint64()) {
+            val = WasmVal(*reinterpret_cast<uint64_t*>(ptr));
+          } else if (type == MachineType::Float32()) {
+            val = WasmVal(*reinterpret_cast<float*>(ptr));
+          } else if (type == MachineType::Float64()) {
+            val = WasmVal(*reinterpret_cast<double*>(ptr));
+          } else {
+            UNREACHABLE();
+          }
+          Push(pc, val);
+          len = 1 + operand.length;
+          break;
+        }
+        case kExprStoreGlobal: {
+          GlobalIndexOperand operand(&decoder, code->at(pc));
+          const WasmGlobal* global = &module()->globals[operand.index];
+          byte* ptr = instance()->globals_start + global->offset;
+          MachineType type = global->type;
+          WasmVal val = Pop();
+          if (type == MachineType::Int8()) {
+            *reinterpret_cast<int8_t*>(ptr) =
+                static_cast<int8_t>(val.to<int32_t>());
+          } else if (type == MachineType::Uint8()) {
+            *reinterpret_cast<uint8_t*>(ptr) =
+                static_cast<uint8_t>(val.to<uint32_t>());
+          } else if (type == MachineType::Int16()) {
+            *reinterpret_cast<int16_t*>(ptr) =
+                static_cast<int16_t>(val.to<int32_t>());
+          } else if (type == MachineType::Uint16()) {
+            *reinterpret_cast<uint16_t*>(ptr) =
+                static_cast<uint16_t>(val.to<uint32_t>());
+          } else if (type == MachineType::Int32()) {
+            *reinterpret_cast<int32_t*>(ptr) = val.to<int32_t>();
+          } else if (type == MachineType::Uint32()) {
+            *reinterpret_cast<uint32_t*>(ptr) = val.to<uint32_t>();
+          } else if (type == MachineType::Int64()) {
+            *reinterpret_cast<int64_t*>(ptr) = val.to<int64_t>();
+          } else if (type == MachineType::Uint64()) {
+            *reinterpret_cast<uint64_t*>(ptr) = val.to<uint64_t>();
+          } else if (type == MachineType::Float32()) {
+            *reinterpret_cast<float*>(ptr) = val.to<float>();
+          } else if (type == MachineType::Float64()) {
+            *reinterpret_cast<double*>(ptr) = val.to<double>();
+          } else {
+            UNREACHABLE();
+          }
+          Push(pc, val);
+          len = 1 + operand.length;
+          break;
+        }
+
+#define LOAD_CASE(name, ctype, mtype)                                    \
+  case kExpr##name: {                                                    \
+    MemoryAccessOperand operand(&decoder, code->at(pc));                 \
+    uint32_t index = Pop().to<uint32_t>();                               \
+    size_t effective_mem_size = instance()->mem_size - sizeof(mtype);    \
+    if (operand.offset > effective_mem_size ||                           \
+        index > (effective_mem_size - operand.offset)) {                 \
+      return DoTrap(kTrapMemOutOfBounds, pc);                            \
+    }                                                                    \
+    byte* addr = instance()->mem_start + operand.offset + index;         \
+    WasmVal result(static_cast<ctype>(ReadUnalignedValue<mtype>(addr))); \
+    Push(pc, result);                                                    \
+    len = 1 + operand.length;                                            \
+    break;                                                               \
+  }
+
+          LOAD_CASE(I32LoadMem8S, int32_t, int8_t);
+          LOAD_CASE(I32LoadMem8U, int32_t, uint8_t);
+          LOAD_CASE(I32LoadMem16S, int32_t, int16_t);
+          LOAD_CASE(I32LoadMem16U, int32_t, uint16_t);
+          LOAD_CASE(I64LoadMem8S, int64_t, int8_t);
+          LOAD_CASE(I64LoadMem8U, int64_t, uint8_t);
+          LOAD_CASE(I64LoadMem16S, int64_t, int16_t);
+          LOAD_CASE(I64LoadMem16U, int64_t, uint16_t);
+          LOAD_CASE(I64LoadMem32S, int64_t, int32_t);
+          LOAD_CASE(I64LoadMem32U, int64_t, uint32_t);
+          LOAD_CASE(I32LoadMem, int32_t, int32_t);
+          LOAD_CASE(I64LoadMem, int64_t, int64_t);
+          LOAD_CASE(F32LoadMem, float, float);
+          LOAD_CASE(F64LoadMem, double, double);
+#undef LOAD_CASE
+
+#define STORE_CASE(name, ctype, mtype)                                     \
+  case kExpr##name: {                                                      \
+    MemoryAccessOperand operand(&decoder, code->at(pc));                   \
+    WasmVal val = Pop();                                                   \
+    uint32_t index = Pop().to<uint32_t>();                                 \
+    size_t effective_mem_size = instance()->mem_size - sizeof(mtype);      \
+    if (operand.offset > effective_mem_size ||                             \
+        index > (effective_mem_size - operand.offset)) {                   \
+      return DoTrap(kTrapMemOutOfBounds, pc);                              \
+    }                                                                      \
+    byte* addr = instance()->mem_start + operand.offset + index;           \
+    WriteUnalignedValue<mtype>(addr, static_cast<mtype>(val.to<ctype>())); \
+    Push(pc, val);                                                         \
+    len = 1 + operand.length;                                              \
+    break;                                                                 \
+  }
+
+          STORE_CASE(I32StoreMem8, int32_t, int8_t);
+          STORE_CASE(I32StoreMem16, int32_t, int16_t);
+          STORE_CASE(I64StoreMem8, int64_t, int8_t);
+          STORE_CASE(I64StoreMem16, int64_t, int16_t);
+          STORE_CASE(I64StoreMem32, int64_t, int32_t);
+          STORE_CASE(I32StoreMem, int32_t, int32_t);
+          STORE_CASE(I64StoreMem, int64_t, int64_t);
+          STORE_CASE(F32StoreMem, float, float);
+          STORE_CASE(F64StoreMem, double, double);
+#undef STORE_CASE
+
+#define ASMJS_LOAD_CASE(name, ctype, mtype, defval)                 \
+  case kExpr##name: {                                               \
+    uint32_t index = Pop().to<uint32_t>();                          \
+    ctype result;                                                   \
+    if (index >= (instance()->mem_size - sizeof(mtype))) {          \
+      result = defval;                                              \
+    } else {                                                        \
+      byte* addr = instance()->mem_start + index;                   \
+      /* TODO(titzer): alignment for asmjs load mem? */             \
+      result = static_cast<ctype>(*reinterpret_cast<mtype*>(addr)); \
+    }                                                               \
+    Push(pc, WasmVal(result));                                      \
+    break;                                                          \
+  }
+          ASMJS_LOAD_CASE(I32AsmjsLoadMem8S, int32_t, int8_t, 0);
+          ASMJS_LOAD_CASE(I32AsmjsLoadMem8U, int32_t, uint8_t, 0);
+          ASMJS_LOAD_CASE(I32AsmjsLoadMem16S, int32_t, int16_t, 0);
+          ASMJS_LOAD_CASE(I32AsmjsLoadMem16U, int32_t, uint16_t, 0);
+          ASMJS_LOAD_CASE(I32AsmjsLoadMem, int32_t, int32_t, 0);
+          ASMJS_LOAD_CASE(F32AsmjsLoadMem, float, float,
+                          std::numeric_limits<float>::quiet_NaN());
+          ASMJS_LOAD_CASE(F64AsmjsLoadMem, double, double,
+                          std::numeric_limits<double>::quiet_NaN());
+#undef ASMJS_LOAD_CASE
+
+#define ASMJS_STORE_CASE(name, ctype, mtype)                                   \
+  case kExpr##name: {                                                          \
+    WasmVal val = Pop();                                                       \
+    uint32_t index = Pop().to<uint32_t>();                                     \
+    if (index < (instance()->mem_size - sizeof(mtype))) {                      \
+      byte* addr = instance()->mem_start + index;                              \
+      /* TODO(titzer): alignment for asmjs store mem? */                       \
+      *(reinterpret_cast<mtype*>(addr)) = static_cast<mtype>(val.to<ctype>()); \
+    }                                                                          \
+    Push(pc, val);                                                             \
+    break;                                                                     \
+  }
+
+          ASMJS_STORE_CASE(I32AsmjsStoreMem8, int32_t, int8_t);
+          ASMJS_STORE_CASE(I32AsmjsStoreMem16, int32_t, int16_t);
+          ASMJS_STORE_CASE(I32AsmjsStoreMem, int32_t, int32_t);
+          ASMJS_STORE_CASE(F32AsmjsStoreMem, float, float);
+          ASMJS_STORE_CASE(F64AsmjsStoreMem, double, double);
+#undef ASMJS_STORE_CASE
+
+        case kExprMemorySize: {
+          Push(pc, WasmVal(static_cast<uint32_t>(instance()->mem_size)));
+          break;
+        }
+#define EXECUTE_SIMPLE_BINOP(name, ctype, op)             \
+  case kExpr##name: {                                     \
+    WasmVal rval = Pop();                                 \
+    WasmVal lval = Pop();                                 \
+    WasmVal result(lval.to<ctype>() op rval.to<ctype>()); \
+    Push(pc, result);                                     \
+    break;                                                \
+  }
+          FOREACH_SIMPLE_BINOP(EXECUTE_SIMPLE_BINOP)
+#undef EXECUTE_SIMPLE_BINOP
+
+#define EXECUTE_OTHER_BINOP(name, ctype)              \
+  case kExpr##name: {                                 \
+    TrapReason trap = kTrapCount;                     \
+    volatile ctype rval = Pop().to<ctype>();          \
+    volatile ctype lval = Pop().to<ctype>();          \
+    WasmVal result(Execute##name(lval, rval, &trap)); \
+    if (trap != kTrapCount) return DoTrap(trap, pc);  \
+    Push(pc, result);                                 \
+    break;                                            \
+  }
+          FOREACH_OTHER_BINOP(EXECUTE_OTHER_BINOP)
+#undef EXECUTE_OTHER_BINOP
+
+#define EXECUTE_OTHER_UNOP(name, ctype)              \
+  case kExpr##name: {                                \
+    TrapReason trap = kTrapCount;                    \
+    volatile ctype val = Pop().to<ctype>();          \
+    WasmVal result(Execute##name(val, &trap));       \
+    if (trap != kTrapCount) return DoTrap(trap, pc); \
+    Push(pc, result);                                \
+    break;                                           \
+  }
+          FOREACH_OTHER_UNOP(EXECUTE_OTHER_UNOP)
+#undef EXECUTE_OTHER_UNOP
+
+        default:
+          V8_Fatal(__FILE__, __LINE__, "Unknown or unimplemented opcode #%d:%s",
+                   code->start[pc], OpcodeName(code->start[pc]));
+          UNREACHABLE();
+      }
+
+      pc += len;
+    }
+    UNREACHABLE();  // above decoding loop should run forever.
+  }
+
+  WasmVal Pop() {
+    DCHECK_GT(stack_.size(), 0u);
+    DCHECK_GT(frames_.size(), 0u);
+    DCHECK_GT(stack_.size(), frames_.back().llimit());  // can't pop into locals
+    WasmVal val = stack_.back();
+    stack_.pop_back();
+    return val;
+  }
+
+  void PopN(int n) {
+    DCHECK_GE(stack_.size(), static_cast<size_t>(n));
+    DCHECK_GT(frames_.size(), 0u);
+    size_t nsize = stack_.size() - n;
+    DCHECK_GE(nsize, frames_.back().llimit());  // can't pop into locals
+    stack_.resize(nsize);
+  }
+
+  WasmVal PopArity(size_t arity) {
+    if (arity == 0) return WasmVal();
+    CHECK_EQ(1, arity);
+    return Pop();
+  }
+
+  void Push(pc_t pc, WasmVal val) {
+    // TODO(titzer): store PC as well?
+    stack_.push_back(val);
+  }
+
+  void TraceStack(const char* phase, pc_t pc) {
+    if (FLAG_trace_wasm_interpreter) {
+      PrintF("%s @%zu", phase, pc);
+      UNIMPLEMENTED();
+      PrintF("\n");
+    }
+  }
+
+  void TraceValueStack() {
+    Frame* top = frames_.size() > 0 ? &frames_.back() : nullptr;
+    sp_t sp = top ? top->sp : 0;
+    sp_t plimit = top ? top->plimit() : 0;
+    sp_t llimit = top ? top->llimit() : 0;
+    if (FLAG_trace_wasm_interpreter) {
+      for (size_t i = sp; i < stack_.size(); ++i) {
+        if (i < plimit)
+          PrintF(" p%zu:", i);
+        else if (i < llimit)
+          PrintF(" l%zu:", i);
+        else
+          PrintF(" s%zu:", i);
+        WasmVal val = stack_[i];
+        switch (val.type) {
+          case kAstI32:
+            PrintF("i32:%d", val.to<int32_t>());
+            break;
+          case kAstI64:
+            PrintF("i64:%" PRId64 "", val.to<int64_t>());
+            break;
+          case kAstF32:
+            PrintF("f32:%f", val.to<float>());
+            break;
+          case kAstF64:
+            PrintF("f64:%lf", val.to<double>());
+            break;
+          case kAstStmt:
+            PrintF("void");
+            break;
+          default:
+            UNREACHABLE();
+            break;
+        }
+      }
+    }
+  }
+};
+
+//============================================================================
+// The implementation details of the interpreter.
+//============================================================================
+class WasmInterpreterInternals : public ZoneObject {
+ public:
+  WasmModuleInstance* instance_;
+  CodeMap codemap_;
+  ZoneVector<ThreadImpl*> threads_;
+
+  WasmInterpreterInternals(Zone* zone, WasmModuleInstance* instance)
+      : instance_(instance),
+        codemap_(instance_ ? instance_->module : nullptr, zone),
+        threads_(zone) {
+    threads_.push_back(new ThreadImpl(zone, &codemap_, instance));
+  }
+
+  void Delete() {
+    // TODO(titzer): CFI doesn't like threads in the ZoneVector.
+    for (auto t : threads_) delete t;
+    threads_.resize(0);
+  }
+};
+
+//============================================================================
+// Implementation of the public interface of the interpreter.
+//============================================================================
+WasmInterpreter::WasmInterpreter(WasmModuleInstance* instance,
+                                 base::AccountingAllocator* allocator)
+    : zone_(allocator),
+      internals_(new (&zone_) WasmInterpreterInternals(&zone_, instance)) {}
+
+WasmInterpreter::~WasmInterpreter() { internals_->Delete(); }
+
+void WasmInterpreter::Run() { internals_->threads_[0]->Run(); }
+
+void WasmInterpreter::Pause() { internals_->threads_[0]->Pause(); }
+
+bool WasmInterpreter::SetBreakpoint(const WasmFunction* function, pc_t pc,
+                                    bool enabled) {
+  InterpreterCode* code = internals_->codemap_.FindCode(function);
+  if (!code) return false;
+  size_t size = static_cast<size_t>(code->end - code->start);
+  // Check bounds for {pc}.
+  if (pc < code->locals.decls_encoded_size || pc >= size) return false;
+  // Make a copy of the code before enabling a breakpoint.
+  if (enabled && code->orig_start == code->start) {
+    code->start = reinterpret_cast<byte*>(zone_.New(size));
+    memcpy(code->start, code->orig_start, size);
+    code->end = code->start + size;
+  }
+  bool prev = code->start[pc] == kInternalBreakpoint;
+  if (enabled) {
+    code->start[pc] = kInternalBreakpoint;
+  } else {
+    code->start[pc] = code->orig_start[pc];
+  }
+  return prev;
+}
+
+bool WasmInterpreter::GetBreakpoint(const WasmFunction* function, pc_t pc) {
+  InterpreterCode* code = internals_->codemap_.FindCode(function);
+  if (!code) return false;
+  size_t size = static_cast<size_t>(code->end - code->start);
+  // Check bounds for {pc}.
+  if (pc < code->locals.decls_encoded_size || pc >= size) return false;
+  // Check if a breakpoint is present at that place in the code.
+  return code->start[pc] == kInternalBreakpoint;
+}
+
+bool WasmInterpreter::SetTracing(const WasmFunction* function, bool enabled) {
+  UNIMPLEMENTED();
+  return false;
+}
+
+int WasmInterpreter::GetThreadCount() {
+  return 1;  // only one thread for now.
+}
+
+WasmInterpreter::Thread* WasmInterpreter::GetThread(int id) {
+  CHECK_EQ(0, id);  // only one thread for now.
+  return internals_->threads_[id];
+}
+
+WasmVal WasmInterpreter::GetLocalVal(const WasmFrame* frame, int index) {
+  CHECK_GE(index, 0);
+  UNIMPLEMENTED();
+  WasmVal none;
+  none.type = kAstStmt;
+  return none;
+}
+
+WasmVal WasmInterpreter::GetExprVal(const WasmFrame* frame, int pc) {
+  UNIMPLEMENTED();
+  WasmVal none;
+  none.type = kAstStmt;
+  return none;
+}
+
+void WasmInterpreter::SetLocalVal(WasmFrame* frame, int index, WasmVal val) {
+  UNIMPLEMENTED();
+}
+
+void WasmInterpreter::SetExprVal(WasmFrame* frame, int pc, WasmVal val) {
+  UNIMPLEMENTED();
+}
+
+size_t WasmInterpreter::GetMemorySize() {
+  return internals_->instance_->mem_size;
+}
+
+WasmVal WasmInterpreter::ReadMemory(size_t offset) {
+  UNIMPLEMENTED();
+  return WasmVal();
+}
+
+void WasmInterpreter::WriteMemory(size_t offset, WasmVal val) {
+  UNIMPLEMENTED();
+}
+
+int WasmInterpreter::AddFunctionForTesting(const WasmFunction* function) {
+  return internals_->codemap_.AddFunction(function, nullptr, nullptr);
+}
+
+bool WasmInterpreter::SetFunctionCodeForTesting(const WasmFunction* function,
+                                                const byte* start,
+                                                const byte* end) {
+  return internals_->codemap_.SetFunctionCode(function, start, end);
+}
+
+ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
+    Zone* zone, const byte* start, const byte* end) {
+  ControlTransfers targets(zone, 0, start, end);
+  return targets.map_;
+}
+
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
diff --git a/src/wasm/wasm-interpreter.h b/src/wasm/wasm-interpreter.h
new file mode 100644
index 0000000..b106a20
--- /dev/null
+++ b/src/wasm/wasm-interpreter.h
@@ -0,0 +1,209 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_INTERPRETER_H_
+#define V8_WASM_INTERPRETER_H_
+
+#include "src/wasm/wasm-opcodes.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace base {
+class AccountingAllocator;
+}
+
+namespace internal {
+namespace wasm {
+
+// forward declarations.
+struct WasmFunction;
+struct WasmModuleInstance;
+class WasmInterpreterInternals;
+
+typedef size_t pc_t;
+typedef size_t sp_t;
+typedef int32_t pcdiff_t;
+typedef uint32_t spdiff_t;
+
+const pc_t kInvalidPc = 0x80000000;
+
+// Visible for testing. A {ControlTransfer} helps the interpreter figure out
+// the target program counter and stack manipulations for a branch.
+struct ControlTransfer {
+  enum StackAction { kNoAction, kPopAndRepush, kPushVoid };
+  pcdiff_t pcdiff;  // adjustment to the program counter (positive or negative).
+  spdiff_t spdiff;  // number of elements to pop off the stack.
+  StackAction action;  // action to perform on the stack.
+};
+typedef ZoneMap<pc_t, ControlTransfer> ControlTransferMap;
+
+// Macro for defining union members.
+#define FOREACH_UNION_MEMBER(V) \
+  V(i32, kAstI32, int32_t)      \
+  V(u32, kAstI32, uint32_t)     \
+  V(i64, kAstI64, int64_t)      \
+  V(u64, kAstI64, uint64_t)     \
+  V(f32, kAstF32, float)        \
+  V(f64, kAstF64, double)
+
+// Representation of values within the interpreter.
+struct WasmVal {
+  LocalType type;
+  union {
+#define DECLARE_FIELD(field, localtype, ctype) ctype field;
+    FOREACH_UNION_MEMBER(DECLARE_FIELD)
+#undef DECLARE_FIELD
+  } val;
+
+  WasmVal() : type(kAstStmt) {}
+
+#define DECLARE_CONSTRUCTOR(field, localtype, ctype) \
+  explicit WasmVal(ctype v) : type(localtype) { val.field = v; }
+  FOREACH_UNION_MEMBER(DECLARE_CONSTRUCTOR)
+#undef DECLARE_CONSTRUCTOR
+
+  template <typename T>
+  T to() {
+    UNREACHABLE();
+  }
+};
+
+#define DECLARE_CAST(field, localtype, ctype) \
+  template <>                                 \
+  inline ctype WasmVal::to() {                \
+    CHECK_EQ(localtype, type);                \
+    return val.field;                         \
+  }
+FOREACH_UNION_MEMBER(DECLARE_CAST)
+#undef DECLARE_CAST
+
+template <>
+inline void WasmVal::to() {
+  CHECK_EQ(kAstStmt, type);
+}
+
+// Representation of frames within the interpreter.
+class WasmFrame {
+ public:
+  const WasmFunction* function() const { return function_; }
+  int pc() const { return pc_; }
+
+ private:
+  friend class WasmInterpreter;
+
+  WasmFrame(const WasmFunction* function, int pc, int fp, int sp)
+      : function_(function), pc_(pc), fp_(fp), sp_(sp) {}
+
+  const WasmFunction* function_;
+  int pc_;
+  int fp_;
+  int sp_;
+};
+
+// An interpreter capable of executing WASM.
+class WasmInterpreter {
+ public:
+  // State machine for a Thread:
+  //                       +---------------Run()-----------+
+  //                       V                               |
+  // STOPPED ---Run()-->  RUNNING  ------Pause()-----+-> PAUSED  <------+
+  //                       | | |                    /      |            |
+  //                       | | +---- Breakpoint ---+       +-- Step() --+
+  //                       | |
+  //                       | +------------ Trap --------------> TRAPPED
+  //                       +------------- Finish -------------> FINISHED
+  enum State { STOPPED, RUNNING, PAUSED, FINISHED, TRAPPED };
+
+  // Representation of a thread in the interpreter.
+  class Thread {
+   public:
+    // Execution control.
+    virtual State state() = 0;
+    virtual void PushFrame(const WasmFunction* function, WasmVal* args) = 0;
+    virtual State Run() = 0;
+    virtual State Step() = 0;
+    virtual void Pause() = 0;
+    virtual void Reset() = 0;
+    virtual ~Thread() {}
+
+    // Stack inspection and modification.
+    virtual pc_t GetBreakpointPc() = 0;
+    virtual int GetFrameCount() = 0;
+    virtual const WasmFrame* GetFrame(int index) = 0;
+    virtual WasmFrame* GetMutableFrame(int index) = 0;
+    virtual WasmVal GetReturnValue() = 0;
+
+    // Thread-specific breakpoints.
+    bool SetBreakpoint(const WasmFunction* function, int pc, bool enabled);
+    bool GetBreakpoint(const WasmFunction* function, int pc);
+  };
+
+  WasmInterpreter(WasmModuleInstance* instance,
+                  base::AccountingAllocator* allocator);
+  ~WasmInterpreter();
+
+  //==========================================================================
+  // Execution controls.
+  //==========================================================================
+  void Run();
+  void Pause();
+
+  // Set a breakpoint at {pc} in {function} to be {enabled}. Returns the
+  // previous state of the breakpoint at {pc}.
+  bool SetBreakpoint(const WasmFunction* function, pc_t pc, bool enabled);
+
+  // Gets the current state of the breakpoint at {function}.
+  bool GetBreakpoint(const WasmFunction* function, pc_t pc);
+
+  // Enable or disable tracing for {function}. Return the previous state.
+  bool SetTracing(const WasmFunction* function, bool enabled);
+
+  //==========================================================================
+  // Thread iteration and inspection.
+  //==========================================================================
+  int GetThreadCount();
+  Thread* GetThread(int id);
+
+  //==========================================================================
+  // Stack frame inspection.
+  //==========================================================================
+  WasmVal GetLocalVal(const WasmFrame* frame, int index);
+  WasmVal GetExprVal(const WasmFrame* frame, int pc);
+  void SetLocalVal(WasmFrame* frame, int index, WasmVal val);
+  void SetExprVal(WasmFrame* frame, int pc, WasmVal val);
+
+  //==========================================================================
+  // Memory access.
+  //==========================================================================
+  size_t GetMemorySize();
+  WasmVal ReadMemory(size_t offset);
+  void WriteMemory(size_t offset, WasmVal val);
+
+  //==========================================================================
+  // Testing functionality.
+  //==========================================================================
+  // Manually adds a function to this interpreter, returning the index of the
+  // function.
+  int AddFunctionForTesting(const WasmFunction* function);
+  // Manually adds code to the interpreter for the given function.
+  bool SetFunctionCodeForTesting(const WasmFunction* function,
+                                 const byte* start, const byte* end);
+
+  // Computes the control targets for the given bytecode as {pc offset, sp
+  // offset}
+  // pairs. Used internally in the interpreter, but exposed for testing.
+  static ControlTransferMap ComputeControlTransfersForTesting(Zone* zone,
+                                                              const byte* start,
+                                                              const byte* end);
+
+ private:
+  Zone zone_;
+  WasmInterpreterInternals* internals_;
+};
+
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_WASM_INTERPRETER_H_
diff --git a/src/wasm/wasm-js.cc b/src/wasm/wasm-js.cc
index 8a4b2ff..6dc1495 100644
--- a/src/wasm/wasm-js.cc
+++ b/src/wasm/wasm-js.cc
@@ -2,11 +2,12 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/api.h"
 #include "src/api-natives.h"
+#include "src/api.h"
 #include "src/assert-scope.h"
 #include "src/ast/ast.h"
 #include "src/ast/scopes.h"
+#include "src/execution.h"
 #include "src/factory.h"
 #include "src/handles.h"
 #include "src/isolate.h"
@@ -34,30 +35,25 @@
   size_t size() { return static_cast<size_t>(end - start); }
 };
 
-RawBuffer GetRawBufferArgument(
-    ErrorThrower& thrower, const v8::FunctionCallbackInfo<v8::Value>& args) {
-  if (args.Length() < 1) {
-    thrower.Error("Argument 0 must be an array buffer");
-    return {nullptr, nullptr};
-  }
-
+RawBuffer GetRawBufferSource(
+    v8::Local<v8::Value> source, ErrorThrower* thrower) {
   const byte* start = nullptr;
   const byte* end = nullptr;
 
-  if (args[0]->IsArrayBuffer()) {
+  if (source->IsArrayBuffer()) {
     // A raw array buffer was passed.
-    Local<ArrayBuffer> buffer = Local<ArrayBuffer>::Cast(args[0]);
+    Local<ArrayBuffer> buffer = Local<ArrayBuffer>::Cast(source);
     ArrayBuffer::Contents contents = buffer->GetContents();
 
     start = reinterpret_cast<const byte*>(contents.Data());
     end = start + contents.ByteLength();
 
     if (start == nullptr || end == start) {
-      thrower.Error("ArrayBuffer argument is empty");
+      thrower->Error("ArrayBuffer argument is empty");
     }
-  } else if (args[0]->IsTypedArray()) {
+  } else if (source->IsTypedArray()) {
     // A TypedArray was passed.
-    Local<TypedArray> array = Local<TypedArray>::Cast(args[0]);
+    Local<TypedArray> array = Local<TypedArray>::Cast(source);
     Local<ArrayBuffer> buffer = array->Buffer();
 
     ArrayBuffer::Contents contents = buffer->GetContents();
@@ -67,10 +63,10 @@
     end = start + array->ByteLength();
 
     if (start == nullptr || end == start) {
-      thrower.Error("ArrayBuffer argument is empty");
+      thrower->Error("ArrayBuffer argument is empty");
     }
   } else {
-    thrower.Error("Argument 0 must be an ArrayBuffer or Uint8Array");
+    thrower->Error("Argument 0 must be an ArrayBuffer or Uint8Array");
   }
 
   return {start, end};
@@ -79,9 +75,13 @@
 void VerifyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
   HandleScope scope(args.GetIsolate());
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
-  ErrorThrower thrower(isolate, "WASM.verifyModule()");
+  ErrorThrower thrower(isolate, "Wasm.verifyModule()");
 
-  RawBuffer buffer = GetRawBufferArgument(thrower, args);
+  if (args.Length() < 1) {
+    thrower.Error("Argument 0 must be a buffer source");
+    return;
+  }
+  RawBuffer buffer = GetRawBufferSource(args[0], &thrower);
   if (thrower.error()) return;
 
   i::Zone zone(isolate->allocator());
@@ -99,9 +99,13 @@
 void VerifyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
   HandleScope scope(args.GetIsolate());
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
-  ErrorThrower thrower(isolate, "WASM.verifyFunction()");
+  ErrorThrower thrower(isolate, "Wasm.verifyFunction()");
 
-  RawBuffer buffer = GetRawBufferArgument(thrower, args);
+  if (args.Length() < 1) {
+    thrower.Error("Argument 0 must be a buffer source");
+    return;
+  }
+  RawBuffer buffer = GetRawBufferSource(args[0], &thrower);
   if (thrower.error()) return;
 
   internal::wasm::FunctionResult result;
@@ -120,8 +124,9 @@
   if (result.val) delete result.val;
 }
 
-v8::internal::wasm::WasmModuleIndex* TranslateAsmModule(
-    i::ParseInfo* info, i::Handle<i::Object> foreign, ErrorThrower* thrower) {
+v8::internal::wasm::ZoneBuffer* TranslateAsmModule(
+    i::ParseInfo* info, ErrorThrower* thrower,
+    i::Handle<i::FixedArray>* foreign_args) {
   info->set_global();
   info->set_lazy(false);
   info->set_allow_lazy_parsing(false);
@@ -149,33 +154,25 @@
     return nullptr;
   }
 
-  auto module =
-      v8::internal::wasm::AsmWasmBuilder(info->isolate(), info->zone(),
-                                         info->literal(), foreign, &typer)
-          .Run();
+  v8::internal::wasm::AsmWasmBuilder builder(info->isolate(), info->zone(),
+                                             info->literal(), &typer);
 
-  return module;
+  return builder.Run(foreign_args);
 }
 
-void InstantiateModuleCommon(const v8::FunctionCallbackInfo<v8::Value>& args,
-                             const byte* start, const byte* end,
-                             ErrorThrower* thrower,
-                             internal::wasm::ModuleOrigin origin) {
+i::MaybeHandle<i::JSObject> InstantiateModuleCommon(
+    const v8::FunctionCallbackInfo<v8::Value>& args, const byte* start,
+    const byte* end, ErrorThrower* thrower,
+    internal::wasm::ModuleOrigin origin = i::wasm::kWasmOrigin) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
 
-  i::Handle<i::JSArrayBuffer> memory = i::Handle<i::JSArrayBuffer>::null();
-  if (args.Length() > 2 && args[2]->IsArrayBuffer()) {
-    Local<Object> obj = Local<Object>::Cast(args[2]);
-    i::Handle<i::Object> mem_obj = v8::Utils::OpenHandle(*obj);
-    memory = i::Handle<i::JSArrayBuffer>(i::JSArrayBuffer::cast(*mem_obj));
-  }
-
   // Decode but avoid a redundant pass over function bodies for verification.
   // Verification will happen during compilation.
   i::Zone zone(isolate->allocator());
   internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
       isolate, &zone, start, end, false, origin);
 
+  i::MaybeHandle<i::JSObject> object;
   if (result.failed() && origin == internal::wasm::kAsmJsOrigin) {
     thrower->Error("Asm.js converted module failed to decode");
   } else if (result.failed()) {
@@ -188,21 +185,27 @@
       ffi = i::Handle<i::JSReceiver>::cast(v8::Utils::OpenHandle(*obj));
     }
 
-    i::MaybeHandle<i::JSObject> object =
-        result.val->Instantiate(isolate, ffi, memory);
+    i::Handle<i::JSArrayBuffer> memory = i::Handle<i::JSArrayBuffer>::null();
+    if (args.Length() > 2 && args[2]->IsArrayBuffer()) {
+      Local<Object> obj = Local<Object>::Cast(args[2]);
+      i::Handle<i::Object> mem_obj = v8::Utils::OpenHandle(*obj);
+      memory = i::Handle<i::JSArrayBuffer>(i::JSArrayBuffer::cast(*mem_obj));
+    }
 
+    object = result.val->Instantiate(isolate, ffi, memory);
     if (!object.is_null()) {
       args.GetReturnValue().Set(v8::Utils::ToLocal(object.ToHandleChecked()));
     }
   }
 
   if (result.val) delete result.val;
+  return object;
 }
 
 void InstantiateModuleFromAsm(const v8::FunctionCallbackInfo<v8::Value>& args) {
   HandleScope scope(args.GetIsolate());
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
-  ErrorThrower thrower(isolate, "WASM.instantiateModuleFromAsm()");
+  ErrorThrower thrower(isolate, "Wasm.instantiateModuleFromAsm()");
 
   if (!args[0]->IsString()) {
     thrower.Error("Asm module text should be a string");
@@ -221,25 +224,157 @@
     foreign = v8::Utils::OpenHandle(*local_foreign);
   }
 
-  auto module = TranslateAsmModule(&info, foreign, &thrower);
+  i::Handle<i::FixedArray> foreign_args;
+  auto module = TranslateAsmModule(&info, &thrower, &foreign_args);
   if (module == nullptr) {
     return;
   }
 
-  InstantiateModuleCommon(args, module->Begin(), module->End(), &thrower,
-                          internal::wasm::kAsmJsOrigin);
+  i::MaybeHandle<i::Object> maybe_module_object =
+      InstantiateModuleCommon(args, module->begin(), module->end(), &thrower,
+                              internal::wasm::kAsmJsOrigin);
+  if (maybe_module_object.is_null()) {
+    return;
+  }
+
+  i::Handle<i::Name> name =
+      factory->NewStringFromStaticChars("__foreign_init__");
+
+  i::Handle<i::Object> module_object = maybe_module_object.ToHandleChecked();
+  i::MaybeHandle<i::Object> maybe_init =
+      i::Object::GetProperty(module_object, name);
+  DCHECK(!maybe_init.is_null());
+
+  i::Handle<i::Object> init = maybe_init.ToHandleChecked();
+  i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
+  i::Handle<i::Object>* foreign_args_array =
+      new i::Handle<i::Object>[foreign_args->length()];
+  for (int j = 0; j < foreign_args->length(); j++) {
+    if (!foreign.is_null()) {
+      i::MaybeHandle<i::Name> name = i::Object::ToName(
+          isolate, i::Handle<i::Object>(foreign_args->get(j), isolate));
+      if (!name.is_null()) {
+        i::MaybeHandle<i::Object> val =
+            i::Object::GetProperty(foreign, name.ToHandleChecked());
+        if (!val.is_null()) {
+          foreign_args_array[j] = val.ToHandleChecked();
+          continue;
+        }
+      }
+    }
+    foreign_args_array[j] = undefined;
+  }
+  i::MaybeHandle<i::Object> retval = i::Execution::Call(
+      isolate, init, undefined, foreign_args->length(), foreign_args_array);
+  delete[] foreign_args_array;
+
+  if (retval.is_null()) {
+    thrower.Error(
+        "WASM.instantiateModuleFromAsm(): foreign init function failed");
+  }
 }
 
 void InstantiateModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
   HandleScope scope(args.GetIsolate());
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
-  ErrorThrower thrower(isolate, "WASM.instantiateModule()");
+  ErrorThrower thrower(isolate, "Wasm.instantiateModule()");
 
-  RawBuffer buffer = GetRawBufferArgument(thrower, args);
+  if (args.Length() < 1) {
+    thrower.Error("Argument 0 must be a buffer source");
+    return;
+  }
+  RawBuffer buffer = GetRawBufferSource(args[0], &thrower);
   if (buffer.start == nullptr) return;
 
-  InstantiateModuleCommon(args, buffer.start, buffer.end, &thrower,
-                          internal::wasm::kWasmOrigin);
+  InstantiateModuleCommon(args, buffer.start, buffer.end, &thrower);
+}
+
+
+static i::MaybeHandle<i::JSObject> CreateModuleObject(
+    v8::Isolate* isolate, const v8::Local<v8::Value> source,
+    ErrorThrower* thrower) {
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+
+  RawBuffer buffer = GetRawBufferSource(source, thrower);
+  if (buffer.start == nullptr) return i::MaybeHandle<i::JSObject>();
+
+  // TODO(rossberg): Once we can, do compilation here.
+  DCHECK(source->IsArrayBuffer() || source->IsTypedArray());
+  Local<Context> context = isolate->GetCurrentContext();
+  i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
+  i::Handle<i::JSFunction> module_cons(i_context->wasm_module_constructor());
+  i::Handle<i::JSObject> module_obj =
+      i_isolate->factory()->NewJSObject(module_cons);
+  i::Handle<i::Object> module_ref = Utils::OpenHandle(*source);
+  i::Handle<i::Symbol> module_sym(i_context->wasm_module_sym());
+  i::Object::SetProperty(module_obj, module_sym, module_ref, i::STRICT).Check();
+
+  return module_obj;
+}
+
+void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  v8::Isolate* isolate = args.GetIsolate();
+  HandleScope scope(isolate);
+  ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
+                       "WebAssembly.compile()");
+
+  if (args.Length() < 1) {
+    thrower.Error("Argument 0 must be a buffer source");
+    return;
+  }
+  i::MaybeHandle<i::JSObject> module_obj =
+      CreateModuleObject(isolate, args[0], &thrower);
+  if (module_obj.is_null()) return;
+
+  Local<Context> context = isolate->GetCurrentContext();
+  v8::Local<v8::Promise::Resolver> resolver;
+  if (!v8::Promise::Resolver::New(context).ToLocal(&resolver)) return;
+  resolver->Resolve(context, Utils::ToLocal(module_obj.ToHandleChecked()));
+
+  v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+  return_value.Set(resolver->GetPromise());
+}
+
+void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  v8::Isolate* isolate = args.GetIsolate();
+  HandleScope scope(isolate);
+  ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
+                       "WebAssembly.Module()");
+
+  if (args.Length() < 1) {
+    thrower.Error("Argument 0 must be a buffer source");
+    return;
+  }
+  i::MaybeHandle<i::JSObject> module_obj =
+      CreateModuleObject(isolate, args[0], &thrower);
+  if (module_obj.is_null()) return;
+
+  v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+  return_value.Set(Utils::ToLocal(module_obj.ToHandleChecked()));
+}
+
+void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  HandleScope scope(args.GetIsolate());
+  v8::Isolate* isolate = args.GetIsolate();
+  ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
+                       "WebAssembly.Instance()");
+
+  if (args.Length() < 1) {
+    thrower.Error("Argument 0 must be a WebAssembly.Module");
+    return;
+  }
+  Local<Context> context = isolate->GetCurrentContext();
+  i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
+  i::Handle<i::Symbol> module_sym(i_context->wasm_module_sym());
+  i::MaybeHandle<i::Object> source =
+      i::Object::GetProperty(Utils::OpenHandle(*args[0]), module_sym);
+  if (source.is_null()) return;
+
+  RawBuffer buffer =
+      GetRawBufferSource(Utils::ToLocal(source.ToHandleChecked()), &thrower);
+  if (buffer.start == nullptr) return;
+
+  InstantiateModuleCommon(args, buffer.start, buffer.end, &thrower);
 }
 }  // namespace
 
@@ -257,8 +392,8 @@
   return isolate->factory()->NewStringFromAsciiChecked(str);
 }
 
-static void InstallFunc(Isolate* isolate, Handle<JSObject> object,
-                        const char* str, FunctionCallback func) {
+static Handle<JSFunction> InstallFunc(Isolate* isolate, Handle<JSObject> object,
+                                      const char* str, FunctionCallback func) {
   Handle<String> name = v8_str(isolate, str);
   Handle<FunctionTemplateInfo> temp = NewTemplate(isolate, func);
   Handle<JSFunction> function =
@@ -266,16 +401,54 @@
   PropertyAttributes attributes =
       static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
   JSObject::AddProperty(object, name, function, attributes);
+  return function;
 }
 
 void WasmJs::Install(Isolate* isolate, Handle<JSGlobalObject> global) {
+  Factory* factory = isolate->factory();
+
   // Setup wasm function map.
   Handle<Context> context(global->native_context(), isolate);
   InstallWasmFunctionMap(isolate, context);
 
-  // Bind the WASM object.
-  Factory* factory = isolate->factory();
-  Handle<String> name = v8_str(isolate, "Wasm");
+  // Bind the experimental WASM object.
+  // TODO(rossberg, titzer): remove once it's no longer needed.
+  {
+    Handle<String> name = v8_str(isolate, "Wasm");
+    Handle<JSFunction> cons = factory->NewFunction(name);
+    JSFunction::SetInstancePrototype(
+        cons, Handle<Object>(context->initial_object_prototype(), isolate));
+    cons->shared()->set_instance_class_name(*name);
+    Handle<JSObject> wasm_object = factory->NewJSObject(cons, TENURED);
+    PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
+    JSObject::AddProperty(global, name, wasm_object, attributes);
+
+    // Install functions on the WASM object.
+    InstallFunc(isolate, wasm_object, "verifyModule", VerifyModule);
+    InstallFunc(isolate, wasm_object, "verifyFunction", VerifyFunction);
+    InstallFunc(isolate, wasm_object, "instantiateModule", InstantiateModule);
+    InstallFunc(isolate, wasm_object, "instantiateModuleFromAsm",
+                InstantiateModuleFromAsm);
+
+    {
+      // Add the Wasm.experimentalVersion property.
+      Handle<String> name = v8_str(isolate, "experimentalVersion");
+      PropertyAttributes attributes =
+          static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
+      Handle<Smi> value =
+          Handle<Smi>(Smi::FromInt(wasm::kWasmVersion), isolate);
+      JSObject::AddProperty(wasm_object, name, value, attributes);
+    }
+  }
+
+  // Create private symbols.
+  Handle<Symbol> module_sym = isolate->factory()->NewPrivateSymbol();
+  Handle<Symbol> instance_sym = isolate->factory()->NewPrivateSymbol();
+  context->set_wasm_module_sym(*module_sym);
+  context->set_wasm_instance_sym(*instance_sym);
+
+  // Bind the WebAssembly object.
+  Handle<String> name = v8_str(isolate, "WebAssembly");
   Handle<JSFunction> cons = factory->NewFunction(name);
   JSFunction::SetInstancePrototype(
       cons, Handle<Object>(context->initial_object_prototype(), isolate));
@@ -284,21 +457,14 @@
   PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
   JSObject::AddProperty(global, name, wasm_object, attributes);
 
-  // Install functions on the WASM object.
-  InstallFunc(isolate, wasm_object, "verifyModule", VerifyModule);
-  InstallFunc(isolate, wasm_object, "verifyFunction", VerifyFunction);
-  InstallFunc(isolate, wasm_object, "instantiateModule", InstantiateModule);
-  InstallFunc(isolate, wasm_object, "instantiateModuleFromAsm",
-              InstantiateModuleFromAsm);
-
-  {
-    // Add the Wasm.experimentalVersion property.
-    Handle<String> name = v8_str(isolate, "experimentalVersion");
-    PropertyAttributes attributes =
-        static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
-    Handle<Smi> value = Handle<Smi>(Smi::FromInt(wasm::kWasmVersion), isolate);
-    JSObject::AddProperty(wasm_object, name, value, attributes);
-  }
+  // Install static methods on WebAssembly object.
+  InstallFunc(isolate, wasm_object, "compile", WebAssemblyCompile);
+  Handle<JSFunction> module_constructor =
+      InstallFunc(isolate, wasm_object, "Module", WebAssemblyModule);
+  Handle<JSFunction> instance_constructor =
+      InstallFunc(isolate, wasm_object, "Instance", WebAssemblyInstance);
+  context->set_wasm_module_constructor(*module_constructor);
+  context->set_wasm_instance_constructor(*instance_constructor);
 }
 
 void WasmJs::InstallWasmFunctionMap(Isolate* isolate, Handle<Context> context) {
diff --git a/src/wasm/wasm-js.h b/src/wasm/wasm-js.h
index e7305aa..ded9a1a 100644
--- a/src/wasm/wasm-js.h
+++ b/src/wasm/wasm-js.h
@@ -7,7 +7,7 @@
 
 #ifndef V8_SHARED
 #include "src/allocation.h"
-#include "src/hashmap.h"
+#include "src/base/hashmap.h"
 #else
 #include "include/v8.h"
 #include "src/base/compiler-specific.h"
diff --git a/src/wasm/wasm-macro-gen.h b/src/wasm/wasm-macro-gen.h
index 83ac86a..d08d709 100644
--- a/src/wasm/wasm-macro-gen.h
+++ b/src/wasm/wasm-macro-gen.h
@@ -140,9 +140,9 @@
 
   // Prepend local declarations by creating a new buffer and copying data
   // over. The new buffer must be delete[]'d by the caller.
-  void Prepend(const byte** start, const byte** end) const {
+  void Prepend(Zone* zone, const byte** start, const byte** end) const {
     size_t size = (*end - *start);
-    byte* buffer = new byte[Size() + size];
+    byte* buffer = reinterpret_cast<byte*>(zone->New(Size() + size));
     size_t pos = Emit(buffer);
     memcpy(buffer + pos, *start, size);
     pos += size;
@@ -153,7 +153,7 @@
   size_t Emit(byte* buffer) const {
     size_t pos = 0;
     pos = WriteUint32v(buffer, pos, static_cast<uint32_t>(local_decls.size()));
-    for (size_t i = 0; i < local_decls.size(); i++) {
+    for (size_t i = 0; i < local_decls.size(); ++i) {
       pos = WriteUint32v(buffer, pos, local_decls[i].first);
       buffer[pos++] = WasmOpcodes::LocalTypeCodeFor(local_decls[i].second);
     }
@@ -364,6 +364,15 @@
       static_cast<byte>(                                                   \
           v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
       ZERO_ALIGNMENT, static_cast<byte>(offset)
+#define WASM_LOAD_MEM_ALIGNMENT(type, index, alignment)                        \
+  index, static_cast<byte>(                                                    \
+             v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, false)), \
+      alignment, ZERO_OFFSET
+#define WASM_STORE_MEM_ALIGNMENT(type, index, alignment, val)              \
+  index, val,                                                              \
+      static_cast<byte>(                                                   \
+          v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
+      alignment, ZERO_OFFSET
 
 #define WASM_CALL_FUNCTION0(index) \
   kExprCallFunction, 0, static_cast<byte>(index)
diff --git a/src/wasm/wasm-module.cc b/src/wasm/wasm-module.cc
index c9a4279..ca0a9b9 100644
--- a/src/wasm/wasm-module.cc
+++ b/src/wasm/wasm-module.cc
@@ -12,6 +12,7 @@
 
 #include "src/wasm/ast-decoder.h"
 #include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-debug.h"
 #include "src/wasm/wasm-function-name-table.h"
 #include "src/wasm/wasm-module.h"
 #include "src/wasm/wasm-result.h"
@@ -22,6 +23,8 @@
 namespace internal {
 namespace wasm {
 
+static const int kPlaceholderMarker = 1000000000;
+
 static const char* wasmSections[] = {
 #define F(enumerator, order, string) string,
     FOR_EACH_WASM_SECTION_TYPE(F)
@@ -109,113 +112,24 @@
   return os;
 }
 
-// A helper class for compiling multiple wasm functions that offers
-// placeholder code objects for calling functions that are not yet compiled.
-class WasmLinker {
- public:
-  WasmLinker(Isolate* isolate, size_t size)
-      : isolate_(isolate), placeholder_code_(size), function_code_(size) {}
-
-  // Get the code object for a function, allocating a placeholder if it has
-  // not yet been compiled.
-  Handle<Code> GetFunctionCode(uint32_t index) {
-    DCHECK(index < function_code_.size());
-    if (function_code_[index].is_null()) {
-      // Create a placeholder code object and encode the corresponding index in
-      // the {constant_pool_offset} field of the code object.
-      // TODO(titzer): placeholder code objects are somewhat dangerous.
-      byte buffer[] = {0, 0, 0, 0, 0, 0, 0, 0};  // fake instructions.
-      CodeDesc desc = {buffer, 8, 8, 0, 0, nullptr};
-      Handle<Code> code = isolate_->factory()->NewCode(
-          desc, Code::KindField::encode(Code::WASM_FUNCTION),
-          Handle<Object>::null());
-      code->set_constant_pool_offset(index + kPlaceholderMarker);
-      placeholder_code_[index] = code;
-      function_code_[index] = code;
-    }
-    return function_code_[index];
-  }
-
-  void Finish(uint32_t index, Handle<Code> code) {
-    DCHECK(index < function_code_.size());
-    function_code_[index] = code;
-  }
-
-  void Link(Handle<FixedArray> function_table,
-            std::vector<uint16_t>& functions) {
-    for (size_t i = 0; i < function_code_.size(); i++) {
-      LinkFunction(function_code_[i]);
-    }
-    if (!function_table.is_null()) {
-      int table_size = static_cast<int>(functions.size());
-      DCHECK_EQ(function_table->length(), table_size * 2);
-      for (int i = 0; i < table_size; i++) {
-        function_table->set(i + table_size, *function_code_[functions[i]]);
-      }
-    }
-  }
-
- private:
-  static const int kPlaceholderMarker = 1000000000;
-
-  Isolate* isolate_;
-  std::vector<Handle<Code>> placeholder_code_;
-  std::vector<Handle<Code>> function_code_;
-
-  void LinkFunction(Handle<Code> code) {
-    bool modified = false;
-    int mode_mask = RelocInfo::kCodeTargetMask;
-    AllowDeferredHandleDereference embedding_raw_address;
-    for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
-      RelocInfo::Mode mode = it.rinfo()->rmode();
-      if (RelocInfo::IsCodeTarget(mode)) {
-        Code* target =
-            Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
-        if (target->kind() == Code::WASM_FUNCTION &&
-            target->constant_pool_offset() >= kPlaceholderMarker) {
-          // Patch direct calls to placeholder code objects.
-          uint32_t index = target->constant_pool_offset() - kPlaceholderMarker;
-          CHECK(index < function_code_.size());
-          Handle<Code> new_target = function_code_[index];
-          if (target != *new_target) {
-            CHECK_EQ(*placeholder_code_[index], target);
-            it.rinfo()->set_target_address(new_target->instruction_start(),
-                                           SKIP_WRITE_BARRIER,
-                                           SKIP_ICACHE_FLUSH);
-            modified = true;
-          }
-        }
-      }
-    }
-    if (modified) {
-      Assembler::FlushICache(isolate_, code->instruction_start(),
-                             code->instruction_size());
-    }
-  }
-};
-
 namespace {
 // Internal constants for the layout of the module object.
-const int kWasmModuleInternalFieldCount = 5;
 const int kWasmModuleFunctionTable = 0;
 const int kWasmModuleCodeTable = 1;
 const int kWasmMemArrayBuffer = 2;
 const int kWasmGlobalsArrayBuffer = 3;
+// TODO(clemensh): Remove function name array, extract names from module bytes.
 const int kWasmFunctionNamesArray = 4;
+const int kWasmModuleBytesString = 5;
+const int kWasmDebugInfo = 6;
+const int kWasmModuleInternalFieldCount = 7;
 
-size_t AllocateGlobalsOffsets(std::vector<WasmGlobal>& globals) {
-  uint32_t offset = 0;
-  if (globals.size() == 0) return 0;
-  for (WasmGlobal& global : globals) {
-    byte size = WasmOpcodes::MemSize(global.type);
-    offset = (offset + size - 1) & ~(size - 1);  // align
-    global.offset = offset;
-    offset += size;
-  }
-  return offset;
+uint32_t GetMinModuleMemSize(const WasmModule* module) {
+  return WasmModule::kPageSize * module->min_mem_pages;
 }
 
-void LoadDataSegments(WasmModule* module, byte* mem_addr, size_t mem_size) {
+void LoadDataSegments(const WasmModule* module, byte* mem_addr,
+                      size_t mem_size) {
   for (const WasmDataSegment& segment : module->data_segments) {
     if (!segment.init) continue;
     if (!segment.source_size) continue;
@@ -228,14 +142,20 @@
   }
 }
 
-Handle<FixedArray> BuildFunctionTable(Isolate* isolate, WasmModule* module) {
-  if (module->function_table.size() == 0) {
+Handle<FixedArray> BuildFunctionTable(Isolate* isolate,
+                                      const WasmModule* module) {
+  // Compute the size of the indirect function table
+  uint32_t table_size = module->FunctionTableSize();
+  if (table_size == 0) {
     return Handle<FixedArray>::null();
   }
-  int table_size = static_cast<int>(module->function_table.size());
+
   Handle<FixedArray> fixed = isolate->factory()->NewFixedArray(2 * table_size);
-  for (int i = 0; i < table_size; i++) {
-    WasmFunction* function = &module->functions[module->function_table[i]];
+  for (uint32_t i = 0;
+       i < static_cast<uint32_t>(module->function_table.size());
+       ++i) {
+    const WasmFunction* function =
+        &module->functions[module->function_table[i]];
     fixed->set(i, Smi::FromInt(function->sig_index));
   }
   return fixed;
@@ -243,15 +163,13 @@
 
 Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
                                      byte** backing_store) {
+  *backing_store = nullptr;
   if (size > (WasmModule::kMaxMemPages * WasmModule::kPageSize)) {
     // TODO(titzer): lift restriction on maximum memory allocated here.
-    *backing_store = nullptr;
     return Handle<JSArrayBuffer>::null();
   }
-  void* memory =
-      isolate->array_buffer_allocator()->Allocate(static_cast<int>(size));
-  if (!memory) {
-    *backing_store = nullptr;
+  void* memory = isolate->array_buffer_allocator()->Allocate(size);
+  if (memory == nullptr) {
     return Handle<JSArrayBuffer>::null();
   }
 
@@ -260,7 +178,7 @@
 #if DEBUG
   // Double check the API allocator actually zero-initialized the memory.
   byte* bytes = reinterpret_cast<byte*>(*backing_store);
-  for (size_t i = 0; i < size; i++) {
+  for (size_t i = 0; i < size; ++i) {
     DCHECK_EQ(0, bytes[i]);
   }
 #endif
@@ -271,12 +189,27 @@
   return buffer;
 }
 
+void RelocateInstanceCode(WasmModuleInstance* instance) {
+  for (uint32_t i = 0; i < instance->function_code.size(); ++i) {
+    Handle<Code> function = instance->function_code[i];
+    AllowDeferredHandleDereference embedding_raw_address;
+    int mask = (1 << RelocInfo::WASM_MEMORY_REFERENCE) |
+               (1 << RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+    for (RelocIterator it(*function, mask); !it.done(); it.next()) {
+      it.rinfo()->update_wasm_memory_reference(
+          nullptr, instance->mem_start, GetMinModuleMemSize(instance->module),
+          static_cast<uint32_t>(instance->mem_size));
+    }
+  }
+}
+
 // Set the memory for a module instance to be the {memory} array buffer.
 void SetMemory(WasmModuleInstance* instance, Handle<JSArrayBuffer> memory) {
   memory->set_is_neuterable(false);
   instance->mem_start = reinterpret_cast<byte*>(memory->backing_store());
   instance->mem_size = memory->byte_length()->Number();
   instance->mem_buffer = memory;
+  RelocateInstanceCode(instance);
 }
 
 // Allocate memory for a module instance as a new JSArrayBuffer.
@@ -289,50 +222,140 @@
     thrower->Error("Out of memory: wasm memory too large");
     return false;
   }
-  instance->mem_size = WasmModule::kPageSize * instance->module->min_mem_pages;
+  instance->mem_size = GetMinModuleMemSize(instance->module);
   instance->mem_buffer =
       NewArrayBuffer(isolate, instance->mem_size, &instance->mem_start);
-  if (!instance->mem_start) {
+  if (instance->mem_start == nullptr) {
     thrower->Error("Out of memory: wasm memory");
     instance->mem_size = 0;
     return false;
   }
+  RelocateInstanceCode(instance);
   return true;
 }
 
 bool AllocateGlobals(ErrorThrower* thrower, Isolate* isolate,
                      WasmModuleInstance* instance) {
-  instance->globals_size = AllocateGlobalsOffsets(instance->module->globals);
-
-  if (instance->globals_size > 0) {
-    instance->globals_buffer = NewArrayBuffer(isolate, instance->globals_size,
-                                              &instance->globals_start);
+  uint32_t globals_size = instance->module->globals_size;
+  if (globals_size > 0) {
+    instance->globals_buffer =
+        NewArrayBuffer(isolate, globals_size, &instance->globals_start);
     if (!instance->globals_start) {
       // Not enough space for backing store of globals.
       thrower->Error("Out of memory: wasm globals");
       return false;
     }
+
+    for (uint32_t i = 0; i < instance->function_code.size(); ++i) {
+      Handle<Code> function = instance->function_code[i];
+      AllowDeferredHandleDereference embedding_raw_address;
+      int mask = 1 << RelocInfo::WASM_GLOBAL_REFERENCE;
+      for (RelocIterator it(*function, mask); !it.done(); it.next()) {
+        it.rinfo()->update_wasm_global_reference(nullptr,
+                                                 instance->globals_start);
+      }
+    }
   }
   return true;
 }
+
+Handle<Code> CreatePlaceholder(Factory* factory, uint32_t index,
+                               Code::Kind kind) {
+  // Create a placeholder code object and encode the corresponding index in
+  // the {constant_pool_offset} field of the code object.
+  // TODO(titzer): placeholder code objects are somewhat dangerous.
+  static byte buffer[] = {0, 0, 0, 0, 0, 0, 0, 0};  // fake instructions.
+  static CodeDesc desc = {buffer, 8, 8, 0, 0, nullptr, 0, nullptr};
+  Handle<Code> code = factory->NewCode(desc, Code::KindField::encode(kind),
+                                       Handle<Object>::null());
+  code->set_constant_pool_offset(static_cast<int>(index) + kPlaceholderMarker);
+  return code;
+}
+
+// TODO(mtrofin): remove when we stop relying on placeholders.
+void InitializePlaceholders(Factory* factory,
+                            std::vector<Handle<Code>>* placeholders,
+                            size_t size) {
+  DCHECK(placeholders->empty());
+  placeholders->reserve(size);
+
+  for (uint32_t i = 0; i < size; ++i) {
+    placeholders->push_back(CreatePlaceholder(factory, i, Code::WASM_FUNCTION));
+  }
+}
+
+bool LinkFunction(Handle<Code> unlinked,
+                  const std::vector<Handle<Code>>& code_targets,
+                  Code::Kind kind) {
+  bool modified = false;
+  int mode_mask = RelocInfo::kCodeTargetMask;
+  AllowDeferredHandleDereference embedding_raw_address;
+  for (RelocIterator it(*unlinked, mode_mask); !it.done(); it.next()) {
+    RelocInfo::Mode mode = it.rinfo()->rmode();
+    if (RelocInfo::IsCodeTarget(mode)) {
+      Code* target =
+          Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+      if (target->kind() == kind &&
+          target->constant_pool_offset() >= kPlaceholderMarker) {
+        // Patch direct calls to placeholder code objects.
+        uint32_t index = target->constant_pool_offset() - kPlaceholderMarker;
+        CHECK(index < code_targets.size());
+        Handle<Code> new_target = code_targets[index];
+        if (target != *new_target) {
+          it.rinfo()->set_target_address(new_target->instruction_start(),
+                                         SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+          modified = true;
+        }
+      }
+    }
+  }
+  return modified;
+}
+
+void LinkModuleFunctions(Isolate* isolate,
+                         std::vector<Handle<Code>>& functions) {
+  for (size_t i = 0; i < functions.size(); ++i) {
+    Handle<Code> code = functions[i];
+    bool modified = LinkFunction(code, functions, Code::WASM_FUNCTION);
+    if (modified) {
+      Assembler::FlushICache(isolate, code->instruction_start(),
+                             code->instruction_size());
+    }
+  }
+}
+
+void LinkImports(Isolate* isolate, std::vector<Handle<Code>>& functions,
+                 const std::vector<Handle<Code>>& imports) {
+  for (uint32_t i = 0; i < functions.size(); ++i) {
+    Handle<Code> code = functions[i];
+    bool modified = LinkFunction(code, imports, Code::WASM_TO_JS_FUNCTION);
+    if (modified) {
+      Assembler::FlushICache(isolate, code->instruction_start(),
+                             code->instruction_size());
+    }
+  }
+}
+
 }  // namespace
 
 WasmModule::WasmModule()
-    : shared_isolate(nullptr),
-      module_start(nullptr),
+    : module_start(nullptr),
       module_end(nullptr),
       min_mem_pages(0),
       max_mem_pages(0),
       mem_export(false),
       mem_external(false),
       start_function_index(-1),
-      origin(kWasmOrigin) {}
+      origin(kWasmOrigin),
+      globals_size(0),
+      indirect_table_size(0),
+      pending_tasks(new base::Semaphore(0)) {}
 
 static MaybeHandle<JSFunction> ReportFFIError(ErrorThrower& thrower,
                                               const char* error, uint32_t index,
                                               wasm::WasmName module_name,
                                               wasm::WasmName function_name) {
-  if (function_name.start()) {
+  if (!function_name.is_empty()) {
     thrower.Error("Import #%d module=\"%.*s\" function=\"%.*s\" error: %s",
                   index, module_name.length(), module_name.start(),
                   function_name.length(), function_name.start(), error);
@@ -368,7 +391,7 @@
   }
 
   Handle<Object> function;
-  if (function_name.start()) {
+  if (!function_name.is_empty()) {
     // Look up the function in the module.
     Handle<String> name = factory->InternalizeUtf8String(function_name);
     MaybeHandle<Object> result = Object::GetProperty(module, name);
@@ -411,7 +434,7 @@
 
   compiler::WasmCompilationUnit* unit = compilation_units->at(index);
   if (unit != nullptr) {
-    compiler::ExecuteCompilation(unit);
+    unit->ExecuteCompilation();
     {
       base::LockGuard<base::Mutex> guard(result_mutex);
       executed_units->push(unit);
@@ -452,22 +475,31 @@
   base::AtomicNumber<size_t>* next_unit_;
 };
 
-void record_code_size(uint32_t& total_code_size, Code* code) {
-  if (FLAG_print_wasm_code_size) {
-    total_code_size += code->body_size() + code->relocation_info()->length();
-  }
-}
+// Records statistics on the code generated by compiling WASM functions.
+struct CodeStats {
+  size_t code_size;
+  size_t reloc_size;
 
-bool CompileWrappersToImportedFunctions(Isolate* isolate, WasmModule* module,
-                                        const Handle<JSReceiver> ffi,
-                                        WasmModuleInstance* instance,
-                                        ErrorThrower* thrower, Factory* factory,
-                                        ModuleEnv* module_env,
-                                        uint32_t& total_code_size) {
-  uint32_t index = 0;
+  inline CodeStats() : code_size(0), reloc_size(0) {}
+
+  inline void Record(Code* code) {
+    code_size += code->body_size();
+    reloc_size += code->relocation_info()->length();
+  }
+
+  inline void Report() {
+    PrintF("Total generated wasm code: %zu bytes\n", code_size);
+    PrintF("Total generated wasm reloc: %zu bytes\n", reloc_size);
+  }
+};
+
+bool CompileWrappersToImportedFunctions(
+    Isolate* isolate, const WasmModule* module, const Handle<JSReceiver> ffi,
+    WasmModuleInstance* instance, ErrorThrower* thrower, Factory* factory) {
   if (module->import_table.size() > 0) {
     instance->import_code.reserve(module->import_table.size());
-    for (const WasmImport& import : module->import_table) {
+    for (uint32_t index = 0; index < module->import_table.size(); ++index) {
+      const WasmImport& import = module->import_table[index];
       WasmName module_name = module->GetNameOrNull(import.module_name_offset,
                                                    import.module_name_length);
       WasmName function_name = module->GetNameOrNull(
@@ -477,28 +509,20 @@
       if (function.is_null()) return false;
 
       Handle<Code> code = compiler::CompileWasmToJSWrapper(
-          isolate, module_env, function.ToHandleChecked(), import.sig,
-          module_name, function_name);
-      instance->import_code.push_back(code);
-      record_code_size(total_code_size, *code);
-      index++;
+          isolate, function.ToHandleChecked(), import.sig, module_name,
+          function_name);
+      instance->import_code[index] = code;
     }
   }
   return true;
 }
 
 void InitializeParallelCompilation(
-    Isolate* isolate, std::vector<WasmFunction>& functions,
+    Isolate* isolate, const std::vector<WasmFunction>& functions,
     std::vector<compiler::WasmCompilationUnit*>& compilation_units,
     ModuleEnv& module_env, ErrorThrower& thrower) {
-  // Create a placeholder code object for all functions.
-  // TODO(ahaas): Maybe we could skip this for external functions.
-  for (uint32_t i = 0; i < functions.size(); i++) {
-    module_env.linker->GetFunctionCode(i);
-  }
-
-  for (uint32_t i = FLAG_skip_compiling_wasm_funcs; i < functions.size(); i++) {
-    compilation_units[i] = compiler::CreateWasmCompilationUnit(
+  for (uint32_t i = FLAG_skip_compiling_wasm_funcs; i < functions.size(); ++i) {
+    compilation_units[i] = new compiler::WasmCompilationUnit(
         &thrower, isolate, &module_env, &functions[i], i);
   }
 }
@@ -507,16 +531,16 @@
     Isolate* isolate,
     std::vector<compiler::WasmCompilationUnit*>& compilation_units,
     std::queue<compiler::WasmCompilationUnit*>& executed_units,
-    const base::SmartPointer<base::Semaphore>& pending_tasks,
-    base::Mutex& result_mutex, base::AtomicNumber<size_t>& next_unit) {
+    base::Semaphore* pending_tasks, base::Mutex& result_mutex,
+    base::AtomicNumber<size_t>& next_unit) {
   const size_t num_tasks =
       Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
           V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads());
   uint32_t* task_ids = new uint32_t[num_tasks];
-  for (size_t i = 0; i < num_tasks; i++) {
+  for (size_t i = 0; i < num_tasks; ++i) {
     WasmCompilationTask* task =
         new WasmCompilationTask(isolate, &compilation_units, &executed_units,
-                                pending_tasks.get(), &result_mutex, &next_unit);
+                                pending_tasks, &result_mutex, &next_unit);
     task_ids[i] = task->id();
     V8::GetCurrentPlatform()->CallOnBackgroundThread(
         task, v8::Platform::kShortRunningTask);
@@ -524,13 +548,12 @@
   return task_ids;
 }
 
-void WaitForCompilationTasks(
-    Isolate* isolate, uint32_t* task_ids,
-    const base::SmartPointer<base::Semaphore>& pending_tasks) {
+void WaitForCompilationTasks(Isolate* isolate, uint32_t* task_ids,
+                             base::Semaphore* pending_tasks) {
   const size_t num_tasks =
       Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
           V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads());
-  for (size_t i = 0; i < num_tasks; i++) {
+  for (size_t i = 0; i < num_tasks; ++i) {
     // If the task has not started yet, then we abort it. Otherwise we wait for
     // it to finish.
     if (!isolate->cancelable_task_manager()->TryAbort(task_ids[i])) {
@@ -540,7 +563,6 @@
 }
 
 void FinishCompilationUnits(
-    WasmModule* module,
     std::queue<compiler::WasmCompilationUnit*>& executed_units,
     std::vector<Handle<Code>>& results, base::Mutex& result_mutex) {
   while (true) {
@@ -553,90 +575,201 @@
       unit = executed_units.front();
       executed_units.pop();
     }
-    int j = compiler::GetIndexOfWasmCompilationUnit(unit);
-    results[j] = compiler::FinishCompilation(unit);
+    int j = unit->index();
+    results[j] = unit->FinishCompilation();
+    delete unit;
   }
 }
 
-bool FinishCompilation(Isolate* isolate, WasmModule* module,
-                       const Handle<JSReceiver> ffi,
-                       const std::vector<Handle<Code>>& results,
-                       const WasmModuleInstance& instance,
-                       const Handle<FixedArray>& code_table,
-                       ErrorThrower& thrower, Factory* factory,
-                       ModuleEnv& module_env, uint32_t& total_code_size,
-                       PropertyDescriptor& desc) {
+void CompileInParallel(Isolate* isolate, const WasmModule* module,
+                       std::vector<Handle<Code>>& functions,
+                       ErrorThrower* thrower, ModuleEnv* module_env) {
+  // Data structures for the parallel compilation.
+  std::vector<compiler::WasmCompilationUnit*> compilation_units(
+      module->functions.size());
+  std::queue<compiler::WasmCompilationUnit*> executed_units;
+
+  //-----------------------------------------------------------------------
+  // For parallel compilation:
+  // 1) The main thread allocates a compilation unit for each wasm function
+  //    and stores them in the vector {compilation_units}.
+  // 2) The main thread spawns {WasmCompilationTask} instances which run on
+  //    the background threads.
+  // 3.a) The background threads and the main thread pick one compilation
+  //      unit at a time and execute the parallel phase of the compilation
+  //      unit. After finishing the execution of the parallel phase, the
+  //      result is enqueued in {executed_units}.
+  // 3.b) If {executed_units} contains a compilation unit, the main thread
+  //      dequeues it and finishes the compilation.
+  // 4) After the parallel phase of all compilation units has started, the
+  //    main thread waits for all {WasmCompilationTask} instances to finish.
+  // 5) The main thread finishes the compilation.
+
+  // Turn on the {CanonicalHandleScope} so that the background threads can
+  // use the node cache.
+  CanonicalHandleScope canonical(isolate);
+
+  // 1) The main thread allocates a compilation unit for each wasm function
+  //    and stores them in the vector {compilation_units}.
+  InitializeParallelCompilation(isolate, module->functions, compilation_units,
+                                *module_env, *thrower);
+
+  // Objects for the synchronization with the background threads.
+  base::Mutex result_mutex;
+  base::AtomicNumber<size_t> next_unit(
+      static_cast<size_t>(FLAG_skip_compiling_wasm_funcs));
+
+  // 2) The main thread spawns {WasmCompilationTask} instances which run on
+  //    the background threads.
+  base::SmartArrayPointer<uint32_t> task_ids(StartCompilationTasks(
+      isolate, compilation_units, executed_units, module->pending_tasks.get(),
+      result_mutex, next_unit));
+
+  // 3.a) The background threads and the main thread pick one compilation
+  //      unit at a time and execute the parallel phase of the compilation
+  //      unit. After finishing the execution of the parallel phase, the
+  //      result is enqueued in {executed_units}.
+  while (FetchAndExecuteCompilationUnit(isolate, &compilation_units,
+                                        &executed_units, &result_mutex,
+                                        &next_unit)) {
+    // 3.b) If {executed_units} contains a compilation unit, the main thread
+    //      dequeues it and finishes the compilation unit. Compilation units
+    //      are finished concurrently to the background threads to save
+    //      memory.
+    FinishCompilationUnits(executed_units, functions, result_mutex);
+  }
+  // 4) After the parallel phase of all compilation units has started, the
+  //    main thread waits for all {WasmCompilationTask} instances to finish.
+  WaitForCompilationTasks(isolate, task_ids.get(), module->pending_tasks.get());
+  // Finish the compilation of the remaining compilation units.
+  FinishCompilationUnits(executed_units, functions, result_mutex);
+}
+
+void CompileSequentially(Isolate* isolate, const WasmModule* module,
+                         std::vector<Handle<Code>>& functions,
+                         ErrorThrower* thrower, ModuleEnv* module_env) {
+  DCHECK(!thrower->error());
+
   for (uint32_t i = FLAG_skip_compiling_wasm_funcs;
-       i < module->functions.size(); i++) {
+       i < module->functions.size(); ++i) {
     const WasmFunction& func = module->functions[i];
-    if (thrower.error()) break;
 
     DCHECK_EQ(i, func.func_index);
     WasmName str = module->GetName(func.name_offset, func.name_length);
     Handle<Code> code = Handle<Code>::null();
-    Handle<JSFunction> function = Handle<JSFunction>::null();
-    Handle<String> function_name = Handle<String>::null();
-    if (FLAG_wasm_num_compilation_tasks != 0) {
-      code = results[i];
-    } else {
-      // Compile the function.
-      code =
-          compiler::CompileWasmFunction(&thrower, isolate, &module_env, &func);
-    }
+    // Compile the function.
+    code = compiler::WasmCompilationUnit::CompileWasmFunction(
+        thrower, isolate, module_env, &func);
     if (code.is_null()) {
-      thrower.Error("Compilation of #%d:%.*s failed.", i, str.length(),
-                    str.start());
-      return false;
+      thrower->Error("Compilation of #%d:%.*s failed.", i, str.length(),
+                     str.start());
+      break;
     }
-    if (func.exported) {
-      function_name = factory->InternalizeUtf8String(str);
-      function = compiler::CompileJSToWasmWrapper(
-          isolate, &module_env, function_name, code, instance.js_object, i);
-      record_code_size(total_code_size, function->code());
-    }
-    if (!code.is_null()) {
       // Install the code into the linker table.
-      module_env.linker->Finish(i, code);
-      code_table->set(i, *code);
-      record_code_size(total_code_size, *code);
-    }
-    if (func.exported) {
-      // Exported functions are installed as read-only properties on the
-      // module.
-      desc.set_value(function);
-      Maybe<bool> status = JSReceiver::DefineOwnProperty(
-          isolate, instance.js_object, function_name, &desc,
-          Object::THROW_ON_ERROR);
-      if (!status.IsJust())
-        thrower.Error("export of %.*s failed.", str.length(), str.start());
+    functions[i] = code;
+  }
+}
+
+void PopulateFunctionTable(WasmModuleInstance* instance) {
+  if (!instance->function_table.is_null()) {
+    uint32_t table_size = instance->module->FunctionTableSize();
+    DCHECK_EQ(table_size * 2, instance->function_table->length());
+    uint32_t populated_table_size =
+        static_cast<uint32_t>(instance->module->function_table.size());
+    for (uint32_t i = 0; i < populated_table_size; ++i) {
+    instance->function_table->set(
+        i + table_size,
+        *instance->function_code[instance->module->function_table[i]]);
     }
   }
-  return true;
 }
 }  // namespace
 
+void SetDeoptimizationData(Factory* factory, Handle<JSObject> js_object,
+                           std::vector<Handle<Code>>& functions) {
+  for (size_t i = FLAG_skip_compiling_wasm_funcs; i < functions.size(); ++i) {
+    Handle<Code> code = functions[i];
+    DCHECK(code->deoptimization_data() == nullptr ||
+           code->deoptimization_data()->length() == 0);
+    Handle<FixedArray> deopt_data = factory->NewFixedArray(2, TENURED);
+    if (!js_object.is_null()) {
+      deopt_data->set(0, *js_object);
+    }
+    deopt_data->set(1, Smi::FromInt(static_cast<int>(i)));
+    deopt_data->set_length(2);
+    code->set_deoptimization_data(*deopt_data);
+  }
+}
+
+Handle<FixedArray> WasmModule::CompileFunctions(Isolate* isolate) const {
+  Factory* factory = isolate->factory();
+  ErrorThrower thrower(isolate, "WasmModule::CompileFunctions()");
+
+  WasmModuleInstance temp_instance_for_compilation(this);
+  temp_instance_for_compilation.function_table =
+      BuildFunctionTable(isolate, this);
+  temp_instance_for_compilation.context = isolate->native_context();
+  temp_instance_for_compilation.mem_size = GetMinModuleMemSize(this);
+  temp_instance_for_compilation.mem_start = nullptr;
+  temp_instance_for_compilation.globals_start = nullptr;
+
+  ModuleEnv module_env;
+  module_env.module = this;
+  module_env.instance = &temp_instance_for_compilation;
+  module_env.origin = origin;
+  InitializePlaceholders(factory, &module_env.placeholders, functions.size());
+
+  Handle<FixedArray> ret =
+      factory->NewFixedArray(static_cast<int>(functions.size()), TENURED);
+
+  temp_instance_for_compilation.import_code.resize(import_table.size());
+  for (uint32_t i = 0; i < import_table.size(); ++i) {
+    temp_instance_for_compilation.import_code[i] =
+        CreatePlaceholder(factory, i, Code::WASM_TO_JS_FUNCTION);
+  }
+  isolate->counters()->wasm_functions_per_module()->AddSample(
+      static_cast<int>(functions.size()));
+  if (FLAG_wasm_num_compilation_tasks != 0) {
+    CompileInParallel(isolate, this,
+                      temp_instance_for_compilation.function_code, &thrower,
+                      &module_env);
+  } else {
+    CompileSequentially(isolate, this,
+                        temp_instance_for_compilation.function_code, &thrower,
+                        &module_env);
+  }
+  if (thrower.error()) {
+    return Handle<FixedArray>::null();
+  }
+
+  LinkModuleFunctions(isolate, temp_instance_for_compilation.function_code);
+
+  // At this point, compilation has completed. Update the code table
+  // and record sizes.
+  for (size_t i = FLAG_skip_compiling_wasm_funcs;
+       i < temp_instance_for_compilation.function_code.size(); ++i) {
+    Code* code = *temp_instance_for_compilation.function_code[i];
+    ret->set(static_cast<int>(i), code);
+  }
+
+  PopulateFunctionTable(&temp_instance_for_compilation);
+
+  return ret;
+}
+
 // Instantiates a wasm module as a JSObject.
 //  * allocates a backing store of {mem_size} bytes.
 //  * installs a named property "memory" for that buffer if exported
 //  * installs named properties on the object for exported functions
 //  * compiles wasm code to machine code
-MaybeHandle<JSObject> WasmModule::Instantiate(Isolate* isolate,
-                                              Handle<JSReceiver> ffi,
-                                              Handle<JSArrayBuffer> memory) {
+MaybeHandle<JSObject> WasmModule::Instantiate(
+    Isolate* isolate, Handle<JSReceiver> ffi,
+    Handle<JSArrayBuffer> memory) const {
   HistogramTimerScope wasm_instantiate_module_time_scope(
       isolate->counters()->wasm_instantiate_module_time());
-  this->shared_isolate = isolate;  // TODO(titzer): have a real shared isolate.
   ErrorThrower thrower(isolate, "WasmModule::Instantiate()");
   Factory* factory = isolate->factory();
 
-  PropertyDescriptor desc;
-  desc.set_writable(false);
-
-  // If FLAG_print_wasm_code_size is set, this aggregates the sum of all code
-  // objects created for this module.
-  // TODO(titzer): switch this to TRACE_EVENT
-  uint32_t total_code_size = 0;
-
   //-------------------------------------------------------------------------
   // Allocate the instance and its JS counterpart.
   //-------------------------------------------------------------------------
@@ -646,9 +779,26 @@
   WasmModuleInstance instance(this);
   instance.context = isolate->native_context();
   instance.js_object = factory->NewJSObjectFromMap(map, TENURED);
-  Handle<FixedArray> code_table =
-      factory->NewFixedArray(static_cast<int>(functions.size()), TENURED);
+
+  Handle<FixedArray> code_table = CompileFunctions(isolate);
+  if (code_table.is_null()) return Handle<JSObject>::null();
+
   instance.js_object->SetInternalField(kWasmModuleCodeTable, *code_table);
+  size_t module_bytes_len =
+      instance.module->module_end - instance.module->module_start;
+  DCHECK_LE(module_bytes_len, static_cast<size_t>(kMaxInt));
+  Vector<const uint8_t> module_bytes_vec(instance.module->module_start,
+                                         static_cast<int>(module_bytes_len));
+  Handle<String> module_bytes_string =
+      factory->NewStringFromOneByte(module_bytes_vec, TENURED)
+          .ToHandleChecked();
+  instance.js_object->SetInternalField(kWasmModuleBytesString,
+                                       *module_bytes_string);
+
+  for (uint32_t i = 0; i < functions.size(); ++i) {
+    Handle<Code> code = Handle<Code>(Code::cast(code_table->get(i)));
+    instance.function_code[i] = code;
+  }
 
   //-------------------------------------------------------------------------
   // Allocate and initialize the linear memory.
@@ -682,132 +832,75 @@
   HistogramTimerScope wasm_compile_module_time_scope(
       isolate->counters()->wasm_compile_module_time());
 
-  instance.function_table = BuildFunctionTable(isolate, this);
-  WasmLinker linker(isolate, functions.size());
   ModuleEnv module_env;
   module_env.module = this;
   module_env.instance = &instance;
-  module_env.linker = &linker;
   module_env.origin = origin;
 
   //-------------------------------------------------------------------------
   // Compile wrappers to imported functions.
   //-------------------------------------------------------------------------
   if (!CompileWrappersToImportedFunctions(isolate, this, ffi, &instance,
-                                          &thrower, factory, &module_env,
-                                          total_code_size)) {
+                                          &thrower, factory)) {
     return MaybeHandle<JSObject>();
   }
-  //-------------------------------------------------------------------------
-  // Compile all functions in the module.
-  //-------------------------------------------------------------------------
+
+  // If FLAG_print_wasm_code_size is set, this aggregates the sum of all code
+  // objects created for this module.
+  // TODO(titzer): switch this to TRACE_EVENT
+  CodeStats code_stats;
+  if (FLAG_print_wasm_code_size) {
+    for (Handle<Code> c : instance.function_code) code_stats.Record(*c);
+    for (Handle<Code> c : instance.import_code) code_stats.Record(*c);
+  }
+
   {
-    isolate->counters()->wasm_functions_per_module()->AddSample(
-        static_cast<int>(functions.size()));
-
-    // Data structures for the parallel compilation.
-    std::vector<compiler::WasmCompilationUnit*> compilation_units(
-        functions.size());
-    std::queue<compiler::WasmCompilationUnit*> executed_units;
-    std::vector<Handle<Code>> results(functions.size());
-
-    if (FLAG_wasm_num_compilation_tasks != 0) {
-      //-----------------------------------------------------------------------
-      // For parallel compilation:
-      // 1) The main thread allocates a compilation unit for each wasm function
-      //    and stores them in the vector {compilation_units}.
-      // 2) The main thread spawns {WasmCompilationTask} instances which run on
-      //    the background threads.
-      // 3.a) The background threads and the main thread pick one compilation
-      //      unit at a time and execute the parallel phase of the compilation
-      //      unit. After finishing the execution of the parallel phase, the
-      //      result is enqueued in {executed_units}.
-      // 3.b) If {executed_units} contains a compilation unit, the main thread
-      //      dequeues it and finishes the compilation.
-      // 4) After the parallel phase of all compilation units has started, the
-      //    main thread waits for all {WasmCompilationTask} instances to finish.
-      // 5) The main thread finishes the compilation.
-
-      // Turn on the {CanonicalHandleScope} so that the background threads can
-      // use the node cache.
-      CanonicalHandleScope canonical(isolate);
-
-      // 1) The main thread allocates a compilation unit for each wasm function
-      //    and stores them in the vector {compilation_units}.
-      InitializeParallelCompilation(isolate, functions, compilation_units,
-                                    module_env, thrower);
-
-      // Objects for the synchronization with the background threads.
-      base::SmartPointer<base::Semaphore> pending_tasks(new base::Semaphore(0));
-      base::Mutex result_mutex;
-      base::AtomicNumber<size_t> next_unit(
-          static_cast<size_t>(FLAG_skip_compiling_wasm_funcs));
-
-      // 2) The main thread spawns {WasmCompilationTask} instances which run on
-      //    the background threads.
-      base::SmartArrayPointer<uint32_t> task_ids(
-          StartCompilationTasks(isolate, compilation_units, executed_units,
-                                pending_tasks, result_mutex, next_unit));
-
-      // 3.a) The background threads and the main thread pick one compilation
-      //      unit at a time and execute the parallel phase of the compilation
-      //      unit. After finishing the execution of the parallel phase, the
-      //      result is enqueued in {executed_units}.
-      while (FetchAndExecuteCompilationUnit(isolate, &compilation_units,
-                                            &executed_units, &result_mutex,
-                                            &next_unit)) {
-        // 3.b) If {executed_units} contains a compilation unit, the main thread
-        //      dequeues it and finishes the compilation unit. Compilation units
-        //      are finished concurrently to the background threads to save
-        //      memory.
-        FinishCompilationUnits(this, executed_units, results, result_mutex);
-      }
-      // 4) After the parallel phase of all compilation units has started, the
-      //    main thread waits for all {WasmCompilationTask} instances to finish.
-      WaitForCompilationTasks(isolate, task_ids.get(), pending_tasks);
-      // Finish the compilation of the remaining compilation units.
-      FinishCompilationUnits(this, executed_units, results, result_mutex);
-    }
-    // 5) The main thread finishes the compilation.
-    if (!FinishCompilation(isolate, this, ffi, results, instance, code_table,
-                           thrower, factory, module_env, total_code_size,
-                           desc)) {
-      return MaybeHandle<JSObject>();
-    }
-
-    // Patch all direct call sites.
-    linker.Link(instance.function_table, this->function_table);
     instance.js_object->SetInternalField(kWasmModuleFunctionTable,
                                          Smi::FromInt(0));
+    LinkImports(isolate, instance.function_code, instance.import_code);
+
+    SetDeoptimizationData(factory, instance.js_object, instance.function_code);
 
     //-------------------------------------------------------------------------
     // Create and populate the exports object.
     //-------------------------------------------------------------------------
     if (export_table.size() > 0 || mem_export) {
-      // Create the "exports" object.
-      Handle<JSFunction> object_function = Handle<JSFunction>(
-          isolate->native_context()->object_function(), isolate);
-      Handle<JSObject> exports_object =
-          factory->NewJSObject(object_function, TENURED);
-      Handle<String> exports_name = factory->InternalizeUtf8String("exports");
-      JSObject::AddProperty(instance.js_object, exports_name, exports_object,
-                            READ_ONLY);
+      Handle<JSObject> exports_object;
+      if (origin == kWasmOrigin) {
+        // Create the "exports" object.
+        Handle<JSFunction> object_function = Handle<JSFunction>(
+            isolate->native_context()->object_function(), isolate);
+        exports_object = factory->NewJSObject(object_function, TENURED);
+        Handle<String> exports_name = factory->InternalizeUtf8String("exports");
+        JSObject::AddProperty(instance.js_object, exports_name, exports_object,
+                              READ_ONLY);
+      } else {
+        // Just export the functions directly on the object returned.
+        exports_object = instance.js_object;
+      }
+
+      PropertyDescriptor desc;
+      desc.set_writable(false);
 
       // Compile wrappers and add them to the exports object.
       for (const WasmExport& exp : export_table) {
         if (thrower.error()) break;
         WasmName str = GetName(exp.name_offset, exp.name_length);
         Handle<String> name = factory->InternalizeUtf8String(str);
-        Handle<Code> code = linker.GetFunctionCode(exp.func_index);
+        Handle<Code> code = instance.function_code[exp.func_index];
         Handle<JSFunction> function = compiler::CompileJSToWasmWrapper(
             isolate, &module_env, name, code, instance.js_object,
             exp.func_index);
-        record_code_size(total_code_size, function->code());
+        if (FLAG_print_wasm_code_size) {
+          code_stats.Record(function->code());
+        }
         desc.set_value(function);
         Maybe<bool> status = JSReceiver::DefineOwnProperty(
             isolate, exports_object, name, &desc, Object::THROW_ON_ERROR);
-        if (!status.IsJust())
+        if (!status.IsJust()) {
           thrower.Error("export of %.*s failed.", str.length(), str.start());
+          break;
+        }
       }
 
       if (mem_export) {
@@ -819,29 +912,28 @@
     }
   }
 
-  //-------------------------------------------------------------------------
-  // Attach an array with function names and an array with offsets into that
-  // first array.
-  //-------------------------------------------------------------------------
-  {
-    Handle<Object> arr = BuildFunctionNamesTable(isolate, module_env.module);
-    instance.js_object->SetInternalField(kWasmFunctionNamesArray, *arr);
+  if (FLAG_print_wasm_code_size) {
+    code_stats.Report();
   }
-
-  if (FLAG_print_wasm_code_size)
-    printf("Total generated wasm code: %u bytes\n", total_code_size);
+  //-------------------------------------------------------------------------
+  // Attach the function name table.
+  //-------------------------------------------------------------------------
+  Handle<ByteArray> function_name_table =
+      BuildFunctionNamesTable(isolate, module_env.module);
+  instance.js_object->SetInternalField(kWasmFunctionNamesArray,
+                                       *function_name_table);
 
   // Run the start function if one was specified.
   if (this->start_function_index >= 0) {
     HandleScope scope(isolate);
     uint32_t index = static_cast<uint32_t>(this->start_function_index);
     Handle<String> name = isolate->factory()->NewStringFromStaticChars("start");
-    Handle<Code> code = linker.GetFunctionCode(index);
+    Handle<Code> code = instance.function_code[index];
     Handle<JSFunction> jsfunc = compiler::CompileJSToWasmWrapper(
         isolate, &module_env, name, code, instance.js_object, index);
 
     // Call the JS function.
-    Handle<Object> undefined(isolate->heap()->undefined_value(), isolate);
+    Handle<Object> undefined = isolate->factory()->undefined_value();
     MaybeHandle<Object> retval =
         Execution::Call(isolate, jsfunc, undefined, 0, nullptr);
 
@@ -852,10 +944,12 @@
   return instance.js_object;
 }
 
-Handle<Code> ModuleEnv::GetFunctionCode(uint32_t index) {
+// TODO(mtrofin): remove this once we move to WASM_DIRECT_CALL
+Handle<Code> ModuleEnv::GetCodeOrPlaceholder(uint32_t index) const {
   DCHECK(IsValidFunction(index));
-  if (linker) return linker->GetFunctionCode(index);
-  return instance ? instance->function_code[index] : Handle<Code>::null();
+  if (!placeholders.empty()) return placeholders[index];
+  DCHECK_NOT_NULL(instance);
+  return instance->function_code[index];
 }
 
 Handle<Code> ModuleEnv::GetImportCode(uint32_t index) {
@@ -868,98 +962,122 @@
   DCHECK(IsValidFunction(index));
   // Always make a direct call to whatever is in the table at that location.
   // A wrapper will be generated for FFI calls.
-  WasmFunction* function = &module->functions[index];
+  const WasmFunction* function = &module->functions[index];
   return GetWasmCallDescriptor(zone, function->sig);
 }
 
+Handle<Object> GetWasmFunctionNameOrNull(Isolate* isolate, Handle<Object> wasm,
+                                         uint32_t func_index) {
+  if (!wasm->IsUndefined(isolate)) {
+    Handle<ByteArray> func_names_arr_obj(
+        ByteArray::cast(Handle<JSObject>::cast(wasm)->GetInternalField(
+            kWasmFunctionNamesArray)),
+        isolate);
+    // TODO(clemens): Extract this from the module bytes; skip whole function
+    // name table.
+    Handle<Object> name;
+    if (GetWasmFunctionNameFromTable(func_names_arr_obj, func_index)
+            .ToHandle(&name)) {
+      return name;
+    }
+  }
+  return isolate->factory()->null_value();
+}
+
+Handle<String> GetWasmFunctionName(Isolate* isolate, Handle<Object> wasm,
+                                   uint32_t func_index) {
+  Handle<Object> name_or_null =
+      GetWasmFunctionNameOrNull(isolate, wasm, func_index);
+  if (!name_or_null->IsNull(isolate)) {
+    return Handle<String>::cast(name_or_null);
+  }
+  return isolate->factory()->NewStringFromStaticChars("<WASM UNNAMED>");
+}
+
+bool IsWasmObject(Object* object) {
+  if (!object->IsJSObject()) return false;
+  JSObject* obj = JSObject::cast(object);
+  if (obj->GetInternalFieldCount() != kWasmModuleInternalFieldCount ||
+      !obj->GetInternalField(kWasmModuleCodeTable)->IsFixedArray() ||
+      !obj->GetInternalField(kWasmMemArrayBuffer)->IsJSArrayBuffer() ||
+      !obj->GetInternalField(kWasmFunctionNamesArray)->IsByteArray() ||
+      !obj->GetInternalField(kWasmModuleBytesString)->IsSeqOneByteString()) {
+    return false;
+  }
+  DisallowHeapAllocation no_gc;
+  SeqOneByteString* bytes =
+      SeqOneByteString::cast(obj->GetInternalField(kWasmModuleBytesString));
+  if (bytes->length() < 4) return false;
+  if (memcmp(bytes->GetChars(), "\0asm", 4)) return false;
+
+  // All checks passed.
+  return true;
+}
+
+SeqOneByteString* GetWasmBytes(JSObject* wasm) {
+  return SeqOneByteString::cast(wasm->GetInternalField(kWasmModuleBytesString));
+}
+
+WasmDebugInfo* GetDebugInfo(JSObject* wasm) {
+  Object* info = wasm->GetInternalField(kWasmDebugInfo);
+  if (!info->IsUndefined(wasm->GetIsolate())) return WasmDebugInfo::cast(info);
+  Handle<WasmDebugInfo> new_info = WasmDebugInfo::New(handle(wasm));
+  wasm->SetInternalField(kWasmDebugInfo, *new_info);
+  return *new_info;
+}
+
+namespace testing {
+
 int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
                                 const byte* module_end, bool asm_js) {
   HandleScope scope(isolate);
   Zone zone(isolate->allocator());
+  ErrorThrower thrower(isolate, "CompileAndRunWasmModule");
+
   // Decode the module, but don't verify function bodies, since we'll
   // be compiling them anyway.
-  ModuleResult result = DecodeWasmModule(isolate, &zone, module_start,
-                                         module_end, false, kWasmOrigin);
-  if (result.failed()) {
-    if (result.val) {
-      delete result.val;
-    }
+  ModuleResult decoding_result =
+      DecodeWasmModule(isolate, &zone, module_start, module_end, false,
+                       asm_js ? kAsmJsOrigin : kWasmOrigin);
+
+  std::unique_ptr<const WasmModule> module(decoding_result.val);
+  if (decoding_result.failed()) {
     // Module verification failed. throw.
-    std::ostringstream str;
-    str << "WASM.compileRun() failed: " << result;
-    isolate->Throw(
-        *isolate->factory()->NewStringFromAsciiChecked(str.str().c_str()));
+    thrower.Error("WASM.compileRun() failed: %s",
+                  decoding_result.error_msg.get());
     return -1;
   }
 
-  int32_t retval = CompileAndRunWasmModule(isolate, result.val);
-  delete result.val;
-  return retval;
-}
-
-int32_t CompileAndRunWasmModule(Isolate* isolate, WasmModule* module) {
-  ErrorThrower thrower(isolate, "CompileAndRunWasmModule");
-  WasmModuleInstance instance(module);
-
-  // Allocate and initialize the linear memory.
-  if (!AllocateMemory(&thrower, isolate, &instance)) {
-    return -1;
+  if (module->import_table.size() > 0) {
+    thrower.Error("Not supported: module has imports.");
   }
-  LoadDataSegments(module, instance.mem_start, instance.mem_size);
-
-  // Allocate the globals area if necessary.
-  if (!AllocateGlobals(&thrower, isolate, &instance)) {
-    return -1;
+  if (module->export_table.size() == 0) {
+    thrower.Error("Not supported: module has no exports.");
   }
 
-  // Build the function table.
-  instance.function_table = BuildFunctionTable(isolate, module);
+  if (thrower.error()) return -1;
 
-  // Create module environment.
-  WasmLinker linker(isolate, module->functions.size());
-  ModuleEnv module_env;
-  module_env.module = module;
-  module_env.instance = &instance;
-  module_env.linker = &linker;
-  module_env.origin = module->origin;
+  Handle<JSObject> instance =
+      module
+          ->Instantiate(isolate, Handle<JSReceiver>::null(),
+                        Handle<JSArrayBuffer>::null())
+          .ToHandleChecked();
 
-  // Compile all functions.
-  Handle<Code> main_code = Handle<Code>::null();  // record last code.
-  uint32_t index = 0;
-  int main_index = 0;
-  for (const WasmFunction& func : module->functions) {
-    DCHECK_EQ(index, func.func_index);
-    // Compile the function and install it in the code table.
-    Handle<Code> code =
-        compiler::CompileWasmFunction(&thrower, isolate, &module_env, &func);
-    if (!code.is_null()) {
-      if (func.exported) {
-        main_code = code;
-        main_index = index;
-      }
-      linker.Finish(index, code);
-    }
-    if (thrower.error()) return -1;
-    index++;
-  }
+  Handle<Name> exports = isolate->factory()->InternalizeUtf8String("exports");
+  Handle<JSObject> exports_object = Handle<JSObject>::cast(
+      JSObject::GetProperty(instance, exports).ToHandleChecked());
+  Handle<Name> main_name = isolate->factory()->NewStringFromStaticChars("main");
+  PropertyDescriptor desc;
+  Maybe<bool> property_found = JSReceiver::GetOwnPropertyDescriptor(
+      isolate, exports_object, main_name, &desc);
+  if (!property_found.FromMaybe(false)) return -1;
 
-  if (main_code.is_null()) {
-    thrower.Error("WASM.compileRun() failed: no main code found");
-    return -1;
-  }
-
-  linker.Link(instance.function_table, instance.module->function_table);
-
-  // Wrap the main code so it can be called as a JS function.
-  Handle<String> name = isolate->factory()->NewStringFromStaticChars("main");
-  Handle<JSObject> module_object = Handle<JSObject>(0, isolate);
-  Handle<JSFunction> jsfunc = compiler::CompileJSToWasmWrapper(
-      isolate, &module_env, name, main_code, module_object, main_index);
+  Handle<JSFunction> main_export = Handle<JSFunction>::cast(desc.value());
 
   // Call the JS function.
-  Handle<Object> undefined(isolate->heap()->undefined_value(), isolate);
+  Handle<Object> undefined = isolate->factory()->undefined_value();
   MaybeHandle<Object> retval =
-      Execution::Call(isolate, jsfunc, undefined, 0, nullptr);
+      Execution::Call(isolate, main_export, undefined, 0, nullptr);
 
   // The result should be a number.
   if (retval.is_null()) {
@@ -977,15 +1095,7 @@
   return -1;
 }
 
-Handle<Object> GetWasmFunctionName(Handle<JSObject> wasm, uint32_t func_index) {
-  Handle<Object> func_names_arr_obj = handle(
-      wasm->GetInternalField(kWasmFunctionNamesArray), wasm->GetIsolate());
-  if (func_names_arr_obj->IsUndefined())
-    return func_names_arr_obj;  // Return undefined.
-  return GetWasmFunctionNameFromTable(
-      Handle<ByteArray>::cast(func_names_arr_obj), func_index);
-}
-
+}  // namespace testing
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
diff --git a/src/wasm/wasm-module.h b/src/wasm/wasm-module.h
index 2ac0425..019dc56 100644
--- a/src/wasm/wasm-module.h
+++ b/src/wasm/wasm-module.h
@@ -5,11 +5,10 @@
 #ifndef V8_WASM_MODULE_H_
 #define V8_WASM_MODULE_H_
 
-#include "src/wasm/wasm-opcodes.h"
-#include "src/wasm/wasm-result.h"
-
 #include "src/api.h"
 #include "src/handles.h"
+#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-result.h"
 
 namespace v8 {
 namespace internal {
@@ -42,15 +41,13 @@
   F(FunctionBodies, 8, "code")         \
   F(DataSegments, 9, "data")           \
   F(Names, 10, "name")                 \
-  F(OldFunctions, 0, "old_function")   \
+  F(FunctionTablePad, 11, "table_pad") \
   F(Globals, 0, "global")              \
   F(End, 0, "end")
 
 // Contants for the above section types: {LEB128 length, characters...}.
 #define WASM_SECTION_MEMORY 6, 'm', 'e', 'm', 'o', 'r', 'y'
 #define WASM_SECTION_SIGNATURES 4, 't', 'y', 'p', 'e'
-#define WASM_SECTION_OLD_FUNCTIONS \
-  12, 'o', 'l', 'd', '_', 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n'
 #define WASM_SECTION_GLOBALS 6, 'g', 'l', 'o', 'b', 'a', 'l'
 #define WASM_SECTION_DATA_SEGMENTS 4, 'd', 'a', 't', 'a'
 #define WASM_SECTION_FUNCTION_TABLE 5, 't', 'a', 'b', 'l', 'e'
@@ -62,11 +59,12 @@
   8, 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n'
 #define WASM_SECTION_FUNCTION_BODIES 4, 'c', 'o', 'd', 'e'
 #define WASM_SECTION_NAMES 4, 'n', 'a', 'm', 'e'
+#define WASM_SECTION_FUNCTION_TABLE_PAD \
+  9, 't', 'a', 'b', 'l', 'e', '_', 'p', 'a', 'd'
 
 // Constants for the above section headers' size (LEB128 + characters).
 #define WASM_SECTION_MEMORY_SIZE ((size_t)7)
 #define WASM_SECTION_SIGNATURES_SIZE ((size_t)5)
-#define WASM_SECTION_OLD_FUNCTIONS_SIZE ((size_t)13)
 #define WASM_SECTION_GLOBALS_SIZE ((size_t)7)
 #define WASM_SECTION_DATA_SEGMENTS_SIZE ((size_t)5)
 #define WASM_SECTION_FUNCTION_TABLE_SIZE ((size_t)6)
@@ -77,6 +75,9 @@
 #define WASM_SECTION_FUNCTION_SIGNATURES_SIZE ((size_t)9)
 #define WASM_SECTION_FUNCTION_BODIES_SIZE ((size_t)5)
 #define WASM_SECTION_NAMES_SIZE ((size_t)5)
+#define WASM_SECTION_FUNCTION_TABLE_PAD_SIZE ((size_t)10)
+
+class WasmDebugInfo;
 
 struct WasmSection {
   enum class Code : uint32_t {
@@ -114,7 +115,6 @@
   uint32_t name_length;  // length in bytes of the name.
   uint32_t code_start_offset;    // offset in the module bytes of code start.
   uint32_t code_end_offset;      // offset in the module bytes of code end.
-  bool exported;                 // true if this function is exported.
 };
 
 // Static representation of an imported WASM function.
@@ -159,7 +159,6 @@
   static const uint32_t kMinMemPages = 1;       // Minimum memory size = 64kb
   static const uint32_t kMaxMemPages = 16384;   // Maximum memory size =  1gb
 
-  Isolate* shared_isolate;    // isolate for storing shared code.
   const byte* module_start;   // starting address for the module bytes.
   const byte* module_end;     // end address for the module bytes.
   uint32_t min_mem_pages;     // minimum size of the memory in 64k pages.
@@ -170,12 +169,23 @@
   ModuleOrigin origin;        // origin of the module
 
   std::vector<WasmGlobal> globals;             // globals in this module.
+  uint32_t globals_size;                       // size of globals table.
+  uint32_t indirect_table_size;                // size of indirect function
+                                               //     table (includes padding).
   std::vector<FunctionSig*> signatures;        // signatures in this module.
   std::vector<WasmFunction> functions;         // functions in this module.
   std::vector<WasmDataSegment> data_segments;  // data segments in this module.
   std::vector<uint16_t> function_table;        // function table.
   std::vector<WasmImport> import_table;        // import table.
   std::vector<WasmExport> export_table;        // export table.
+  // We store the semaphore here to extend its lifetime. In <libc-2.21, which we
+  // use on the try bots, semaphore::Wait() can return while some compilation
+  // tasks are still executing semaphore::Signal(). If the semaphore is cleaned
+  // up right after semaphore::Wait() returns, then this can cause an
+  // invalid-semaphore error in the compilation tasks.
+  // TODO(wasm): Move this semaphore back to CompileInParallel when the try bots
+  // switch to libc-2.21 or higher.
+  base::SmartPointer<base::Semaphore> pending_tasks;
 
   WasmModule();
 
@@ -195,7 +205,7 @@
 
   // Get a string stored in the module bytes representing a name.
   WasmName GetNameOrNull(uint32_t offset, uint32_t length) const {
-    if (length == 0) return {NULL, 0};  // no name.
+    if (offset == 0 && length == 0) return {NULL, 0};  // no name.
     CHECK(BoundsCheck(offset, offset + length));
     DCHECK_GE(static_cast<int>(length), 0);
     return {reinterpret_cast<const char*>(module_start + offset),
@@ -203,7 +213,7 @@
   }
 
   // Get a string stored in the module bytes representing a function name.
-  WasmName GetNameOrNull(WasmFunction* function) const {
+  WasmName GetNameOrNull(const WasmFunction* function) const {
     return GetNameOrNull(function->name_offset, function->name_length);
   }
 
@@ -215,12 +225,22 @@
 
   // Creates a new instantiation of the module in the given isolate.
   MaybeHandle<JSObject> Instantiate(Isolate* isolate, Handle<JSReceiver> ffi,
-                                    Handle<JSArrayBuffer> memory);
+                                    Handle<JSArrayBuffer> memory) const;
+
+  Handle<FixedArray> CompileFunctions(Isolate* isolate) const;
+
+  uint32_t FunctionTableSize() const {
+    if (indirect_table_size > 0) {
+      return indirect_table_size;
+    }
+    DCHECK_LE(function_table.size(), UINT32_MAX);
+    return static_cast<uint32_t>(function_table.size());
+  }
 };
 
 // An instantiated WASM module, including memory, function table, etc.
 struct WasmModuleInstance {
-  WasmModule* module;  // static representation of the module.
+  const WasmModule* module;  // static representation of the module.
   // -- Heap allocated --------------------------------------------------------
   Handle<JSObject> js_object;            // JavaScript module object.
   Handle<Context> context;               // JavaScript native context.
@@ -231,34 +251,33 @@
   std::vector<Handle<Code>> import_code;    // code objects for each import.
   // -- raw memory ------------------------------------------------------------
   byte* mem_start;  // start of linear memory.
-  size_t mem_size;  // size of the linear memory.
+  uint32_t mem_size;  // size of the linear memory.
   // -- raw globals -----------------------------------------------------------
   byte* globals_start;  // start of the globals area.
-  size_t globals_size;  // size of the globals area.
 
-  explicit WasmModuleInstance(WasmModule* m)
+  explicit WasmModuleInstance(const WasmModule* m)
       : module(m),
+        function_code(m->functions.size()),
+        import_code(m->import_table.size()),
         mem_start(nullptr),
         mem_size(0),
-        globals_start(nullptr),
-        globals_size(0) {}
+        globals_start(nullptr) {}
 };
 
-// forward declaration.
-class WasmLinker;
-
 // Interface provided to the decoder/graph builder which contains only
 // minimal information about the globals, functions, and function tables.
 struct ModuleEnv {
-  WasmModule* module;
+  const WasmModule* module;
   WasmModuleInstance* instance;
-  WasmLinker* linker;
   ModuleOrigin origin;
+  // TODO(mtrofin): remove this once we introduce WASM_DIRECT_CALL
+  // reloc infos.
+  std::vector<Handle<Code>> placeholders;
 
   bool IsValidGlobal(uint32_t index) {
     return module && index < module->globals.size();
   }
-  bool IsValidFunction(uint32_t index) {
+  bool IsValidFunction(uint32_t index) const {
     return module && index < module->functions.size();
   }
   bool IsValidSignature(uint32_t index) {
@@ -283,15 +302,14 @@
     DCHECK(IsValidSignature(index));
     return module->signatures[index];
   }
-  size_t FunctionTableSize() {
-    return module ? module->function_table.size() : 0;
+  uint32_t FunctionTableSize() const {
+    return module->FunctionTableSize();
   }
 
   bool asm_js() { return origin == kAsmJsOrigin; }
 
-  Handle<Code> GetFunctionCode(uint32_t index);
+  Handle<Code> GetCodeOrPlaceholder(uint32_t index) const;
   Handle<Code> GetImportCode(uint32_t index);
-  Handle<FixedArray> GetFunctionTable();
 
   static compiler::CallDescriptor* GetWasmCallDescriptor(Zone* zone,
                                                          FunctionSig* sig);
@@ -312,22 +330,45 @@
 std::ostream& operator<<(std::ostream& os, const WasmFunction& function);
 std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name);
 
-typedef Result<WasmModule*> ModuleResult;
+typedef Result<const WasmModule*> ModuleResult;
 typedef Result<WasmFunction*> FunctionResult;
+typedef std::vector<std::pair<int, int>> FunctionOffsets;
+typedef Result<FunctionOffsets> FunctionOffsetsResult;
 
-// For testing. Decode, verify, and run the last exported function in the
-// given encoded module.
+// Extract a function name from the given wasm object.
+// Returns "<WASM UNNAMED>" if the function is unnamed or the name is not a
+// valid UTF-8 string.
+Handle<String> GetWasmFunctionName(Isolate* isolate, Handle<Object> wasm,
+                                   uint32_t func_index);
+
+// Extract a function name from the given wasm object.
+// Returns a null handle if the function is unnamed or the name is not a valid
+// UTF-8 string.
+Handle<Object> GetWasmFunctionNameOrNull(Isolate* isolate, Handle<Object> wasm,
+                                         uint32_t func_index);
+
+// Return the binary source bytes of a wasm module.
+SeqOneByteString* GetWasmBytes(JSObject* wasm);
+
+// Get the debug info associated with the given wasm object.
+// If no debug info exists yet, it is created automatically.
+WasmDebugInfo* GetDebugInfo(JSObject* wasm);
+
+// Check whether the given object is a wasm object.
+// This checks the number and type of internal fields, so it's not 100 percent
+// secure. If it turns out that we need more complete checks, we could add a
+// special marker as internal field, which will definitely never occur anywhere
+// else.
+bool IsWasmObject(Object* object);
+
+namespace testing {
+
+// Decode, verify, and run the function labeled "main" in the
+// given encoded module. The module should have no imports.
 int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
                                 const byte* module_end, bool asm_js = false);
 
-// For testing. Decode, verify, and run the last exported function in the
-// given decoded module.
-int32_t CompileAndRunWasmModule(Isolate* isolate, WasmModule* module);
-
-// Extract a function name from the given wasm object.
-// Returns undefined if the function is unnamed or the function index is
-// invalid.
-Handle<Object> GetWasmFunctionName(Handle<JSObject> wasm, uint32_t func_index);
+}  // namespace testing
 
 }  // namespace wasm
 }  // namespace internal
diff --git a/src/wasm/wasm-opcodes.cc b/src/wasm/wasm-opcodes.cc
index a08fa8d..da6c161 100644
--- a/src/wasm/wasm-opcodes.cc
+++ b/src/wasm/wasm-opcodes.cc
@@ -40,12 +40,12 @@
 
 std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
   if (sig.return_count() == 0) os << "v";
-  for (size_t i = 0; i < sig.return_count(); i++) {
+  for (size_t i = 0; i < sig.return_count(); ++i) {
     os << WasmOpcodes::ShortNameOf(sig.GetReturn(i));
   }
   os << "_";
   if (sig.parameter_count() == 0) os << "v";
-  for (size_t i = 0; i < sig.parameter_count(); i++) {
+  for (size_t i = 0; i < sig.parameter_count(); ++i) {
     os << WasmOpcodes::ShortNameOf(sig.GetParam(i));
   }
   return os;
diff --git a/src/wasm/wasm-opcodes.h b/src/wasm/wasm-opcodes.h
index 764c503..b29e4a0 100644
--- a/src/wasm/wasm-opcodes.h
+++ b/src/wasm/wasm-opcodes.h
@@ -18,7 +18,8 @@
   kLocalI32 = 1,
   kLocalI64 = 2,
   kLocalF32 = 3,
-  kLocalF64 = 4
+  kLocalF64 = 4,
+  kLocalS128 = 5
 };
 
 // Binary encoding of memory types.
@@ -32,7 +33,8 @@
   kMemI64 = 6,
   kMemU64 = 7,
   kMemF32 = 8,
-  kMemF64 = 9
+  kMemF64 = 9,
+  kMemS128 = 10
 };
 
 // We reuse the internal machine type to represent WebAssembly AST types.
@@ -43,6 +45,7 @@
 const LocalType kAstI64 = MachineRepresentation::kWord64;
 const LocalType kAstF32 = MachineRepresentation::kFloat32;
 const LocalType kAstF64 = MachineRepresentation::kFloat64;
+const LocalType kAstS128 = MachineRepresentation::kSimd128;
 // We use kTagged here because kNone is already used by kAstStmt.
 const LocalType kAstEnd = MachineRepresentation::kTagged;
 
@@ -115,8 +118,8 @@
 
 // Load memory expressions.
 #define FOREACH_MISC_MEM_OPCODE(V) \
-  V(MemorySize, 0x3b, i_v)         \
-  V(GrowMemory, 0x39, i_i)
+  V(GrowMemory, 0x39, i_i)         \
+  V(MemorySize, 0x3b, i_v)
 
 // Expressions with signatures.
 #define FOREACH_SIMPLE_OPCODE(V)  \
@@ -278,18 +281,144 @@
   V(I32AsmjsSConvertF64, 0xe2, i_d)    \
   V(I32AsmjsUConvertF64, 0xe3, i_d)
 
+#define FOREACH_SIMD_OPCODE(V)         \
+  V(F32x4Splat, 0xe500, s_f)           \
+  V(F32x4ExtractLane, 0xe501, f_si)    \
+  V(F32x4ReplaceLane, 0xe502, s_sif)   \
+  V(F32x4Abs, 0xe503, s_s)             \
+  V(F32x4Neg, 0xe504, s_s)             \
+  V(F32x4Sqrt, 0xe505, s_s)            \
+  V(F32x4RecipApprox, 0xe506, s_s)     \
+  V(F32x4SqrtApprox, 0xe507, s_s)      \
+  V(F32x4Add, 0xe508, s_ss)            \
+  V(F32x4Sub, 0xe509, s_ss)            \
+  V(F32x4Mul, 0xe50a, s_ss)            \
+  V(F32x4Div, 0xe50b, s_ss)            \
+  V(F32x4Min, 0xe50c, s_ss)            \
+  V(F32x4Max, 0xe50d, s_ss)            \
+  V(F32x4MinNum, 0xe50e, s_ss)         \
+  V(F32x4MaxNum, 0xe50f, s_ss)         \
+  V(F32x4Eq, 0xe510, s_ss)             \
+  V(F32x4Ne, 0xe511, s_ss)             \
+  V(F32x4Lt, 0xe512, s_ss)             \
+  V(F32x4Le, 0xe513, s_ss)             \
+  V(F32x4Gt, 0xe514, s_ss)             \
+  V(F32x4Ge, 0xe515, s_ss)             \
+  V(F32x4Select, 0xe516, s_sss)        \
+  V(F32x4Swizzle, 0xe517, s_s)         \
+  V(F32x4Shuffle, 0xe518, s_ss)        \
+  V(F32x4FromInt32x4, 0xe519, s_s)     \
+  V(F32x4FromUint32x4, 0xe51a, s_s)    \
+  V(I32x4Splat, 0xe51b, s_i)           \
+  V(I32x4ExtractLane, 0xe51c, i_si)    \
+  V(I32x4ReplaceLane, 0xe51d, s_sii)   \
+  V(I32x4Neg, 0xe51e, s_s)             \
+  V(I32x4Add, 0xe51f, s_ss)            \
+  V(I32x4Sub, 0xe520, s_ss)            \
+  V(I32x4Mul, 0xe521, s_ss)            \
+  V(I32x4Min_s, 0xe522, s_ss)          \
+  V(I32x4Max_s, 0xe523, s_ss)          \
+  V(I32x4Shl, 0xe524, s_si)            \
+  V(I32x4Shr_s, 0xe525, s_si)          \
+  V(I32x4Eq, 0xe526, s_ss)             \
+  V(I32x4Ne, 0xe527, s_ss)             \
+  V(I32x4Lt_s, 0xe528, s_ss)           \
+  V(I32x4Le_s, 0xe529, s_ss)           \
+  V(I32x4Gt_s, 0xe52a, s_ss)           \
+  V(I32x4Ge_s, 0xe52b, s_ss)           \
+  V(I32x4Select, 0xe52c, s_sss)        \
+  V(I32x4Swizzle, 0xe52d, s_s)         \
+  V(I32x4Shuffle, 0xe52e, s_ss)        \
+  V(I32x4FromFloat32x4, 0xe52f, s_s)   \
+  V(I32x4Min_u, 0xe530, s_ss)          \
+  V(I32x4Max_u, 0xe531, s_ss)          \
+  V(I32x4Shr_u, 0xe532, s_ss)          \
+  V(I32x4Lt_u, 0xe533, s_ss)           \
+  V(I32x4Le_u, 0xe534, s_ss)           \
+  V(I32x4Gt_u, 0xe535, s_ss)           \
+  V(I32x4Ge_u, 0xe536, s_ss)           \
+  V(Ui32x4FromFloat32x4, 0xe537, s_s)  \
+  V(I16x8Splat, 0xe538, s_i)           \
+  V(I16x8ExtractLane, 0xe539, i_si)    \
+  V(I16x8ReplaceLane, 0xe53a, s_sii)   \
+  V(I16x8Neg, 0xe53b, s_s)             \
+  V(I16x8Add, 0xe53c, s_ss)            \
+  V(I16x8AddSaturate_s, 0xe53d, s_ss)  \
+  V(I16x8Sub, 0xe53e, s_ss)            \
+  V(I16x8SubSaturate_s, 0xe53f, s_ss)  \
+  V(I16x8Mul, 0xe540, s_ss)            \
+  V(I16x8Min_s, 0xe541, s_ss)          \
+  V(I16x8Max_s, 0xe542, s_ss)          \
+  V(I16x8Shl, 0xe543, s_si)            \
+  V(I16x8Shr_s, 0xe544, s_si)          \
+  V(I16x8Eq, 0xe545, s_ss)             \
+  V(I16x8Ne, 0xe546, s_ss)             \
+  V(I16x8Lt_s, 0xe547, s_ss)           \
+  V(I16x8Le_s, 0xe548, s_ss)           \
+  V(I16x8Gt_s, 0xe549, s_ss)           \
+  V(I16x8Ge_s, 0xe54a, s_ss)           \
+  V(I16x8Select, 0xe54b, s_sss)        \
+  V(I16x8Swizzle, 0xe54c, s_s)         \
+  V(I16x8Shuffle, 0xe54d, s_ss)        \
+  V(I16x8AddSaturate_u, 0xe54e, s_ss)  \
+  V(I16x8SubSaturate_u, 0xe54f, s_ss)  \
+  V(I16x8Min_u, 0xe550, s_ss)          \
+  V(I16x8Max_u, 0xe551, s_ss)          \
+  V(I16x8Shr_u, 0xe552, s_si)          \
+  V(I16x8Lt_u, 0xe553, s_ss)           \
+  V(I16x8Le_u, 0xe554, s_ss)           \
+  V(I16x8Gt_u, 0xe555, s_ss)           \
+  V(I16x8Ge_u, 0xe556, s_ss)           \
+  V(I8x16Splat, 0xe557, s_i)           \
+  V(I8x16ExtractLane, 0xe558, i_si)    \
+  V(I8x16ReplaceLane, 0xe559, s_sii)   \
+  V(I8x16Neg, 0xe55a, s_s)             \
+  V(I8x16Add, 0xe55b, s_ss)            \
+  V(I8x16AddSaturate_s, 0xe55c, s_ss)  \
+  V(I8x16Sub, 0xe55d, s_ss)            \
+  V(I8x16SubSaturate_s, 0xe55e, s_ss)  \
+  V(I8x16Mul, 0xe55f, s_ss)            \
+  V(I8x16Min_s, 0xe560, s_ss)          \
+  V(I8x16Max_s, 0xe561, s_ss)          \
+  V(I8x16Shl, 0xe562, s_si)            \
+  V(I8x16Shr_s, 0xe563, s_si)          \
+  V(I8x16Eq, 0xe564, s_ss)             \
+  V(I8x16Neq, 0xe565, s_ss)            \
+  V(I8x16Lt_s, 0xe566, s_ss)           \
+  V(I8x16Le_s, 0xe567, s_ss)           \
+  V(I8x16Gt_s, 0xe568, s_ss)           \
+  V(I8x16Ge_s, 0xe569, s_ss)           \
+  V(I8x16Select, 0xe56a, s_sss)        \
+  V(I8x16Swizzle, 0xe56b, s_s)         \
+  V(I8x16Shuffle, 0xe56c, s_ss)        \
+  V(I8x16AddSaturate_u, 0xe56d, s_ss)  \
+  V(I8x16Sub_saturate_u, 0xe56e, s_ss) \
+  V(I8x16Min_u, 0xe56f, s_ss)          \
+  V(I8x16Max_u, 0xe570, s_ss)          \
+  V(I8x16Shr_u, 0xe571, s_ss)          \
+  V(I8x16Lt_u, 0xe572, s_ss)           \
+  V(I8x16Le_u, 0xe573, s_ss)           \
+  V(I8x16Gt_u, 0xe574, s_ss)           \
+  V(I8x16Ge_u, 0xe575, s_ss)           \
+  V(S128And, 0xe576, s_ss)             \
+  V(S128Ior, 0xe577, s_ss)             \
+  V(S128Xor, 0xe578, s_ss)             \
+  V(S128Not, 0xe579, s_s)
+
 // All opcodes.
-#define FOREACH_OPCODE(V)     \
-  FOREACH_CONTROL_OPCODE(V)   \
-  FOREACH_MISC_OPCODE(V)      \
-  FOREACH_SIMPLE_OPCODE(V)    \
-  FOREACH_STORE_MEM_OPCODE(V) \
-  FOREACH_LOAD_MEM_OPCODE(V)  \
-  FOREACH_MISC_MEM_OPCODE(V)  \
-  FOREACH_ASMJS_COMPAT_OPCODE(V)
+#define FOREACH_OPCODE(V)        \
+  FOREACH_CONTROL_OPCODE(V)      \
+  FOREACH_MISC_OPCODE(V)         \
+  FOREACH_SIMPLE_OPCODE(V)       \
+  FOREACH_STORE_MEM_OPCODE(V)    \
+  FOREACH_LOAD_MEM_OPCODE(V)     \
+  FOREACH_MISC_MEM_OPCODE(V)     \
+  FOREACH_ASMJS_COMPAT_OPCODE(V) \
+  FOREACH_SIMD_OPCODE(V)
 
 // All signatures.
 #define FOREACH_SIGNATURE(V)         \
+  FOREACH_SIMD_SIGNATURE(V)          \
   V(i_ii, kAstI32, kAstI32, kAstI32) \
   V(i_i, kAstI32, kAstI32)           \
   V(i_v, kAstI32)                    \
@@ -318,6 +447,18 @@
   V(f_if, kAstF32, kAstI32, kAstF32) \
   V(l_il, kAstI64, kAstI32, kAstI64)
 
+#define FOREACH_SIMD_SIGNATURE(V)                  \
+  V(s_s, kAstS128, kAstS128)                       \
+  V(s_f, kAstS128, kAstF32)                        \
+  V(f_si, kAstF32, kAstS128, kAstI32)              \
+  V(s_sif, kAstS128, kAstS128, kAstI32, kAstF32)   \
+  V(s_ss, kAstS128, kAstS128, kAstS128)            \
+  V(s_sss, kAstS128, kAstS128, kAstS128, kAstS128) \
+  V(s_i, kAstS128, kAstI32)                        \
+  V(i_si, kAstI32, kAstS128, kAstI32)              \
+  V(s_sii, kAstS128, kAstS128, kAstI32, kAstI32)   \
+  V(s_si, kAstS128, kAstS128, kAstI32)
+
 enum WasmOpcode {
 // Declare expression opcodes.
 #define DECLARE_NAMED_ENUM(name, opcode, sig) kExpr##name = opcode,
@@ -369,6 +510,8 @@
         return kLocalF64;
       case kAstStmt:
         return kLocalVoid;
+      case kAstS128:
+        return kLocalS128;
       default:
         UNREACHABLE();
         return kLocalVoid;
@@ -396,6 +539,8 @@
       return kMemF32;
     } else if (type == MachineType::Float64()) {
       return kMemF64;
+    } else if (type == MachineType::Simd128()) {
+      return kMemS128;
     } else {
       UNREACHABLE();
       return kMemI32;
@@ -412,6 +557,8 @@
         return MachineType::Float32();
       case kAstF64:
         return MachineType::Float64();
+      case kAstS128:
+        return MachineType::Simd128();
       case kAstStmt:
         return MachineType::None();
       default:
@@ -441,6 +588,8 @@
       return kAstF32;
     } else if (type == MachineType::Float64()) {
       return kAstF64;
+    } else if (type == MachineType::Simd128()) {
+      return kAstS128;
     } else {
       UNREACHABLE();
       return kAstI32;
@@ -484,6 +633,8 @@
         return 'f';
       case kAstF64:
         return 'd';
+      case kAstS128:
+        return 's';
       case kAstStmt:
         return 'v';
       case kAstEnd:
@@ -504,6 +655,8 @@
         return "f32";
       case kAstF64:
         return "f64";
+      case kAstS128:
+        return "s128";
       case kAstStmt:
         return "<stmt>";
       case kAstEnd:
diff --git a/src/wasm/wasm-result.cc b/src/wasm/wasm-result.cc
index 3de5812..30268ac 100644
--- a/src/wasm/wasm-result.cc
+++ b/src/wasm/wasm-result.cc
@@ -6,8 +6,7 @@
 
 #include "src/factory.h"
 #include "src/heap/heap.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"  // TODO(mstarzinger): Temporary cycle breaker!
+#include "src/isolate-inl.h"
 #include "src/objects.h"
 
 #include "src/base/platform/platform.h"
@@ -29,10 +28,10 @@
 }
 
 void ErrorThrower::Error(const char* format, ...) {
-  if (error_) return;  // only report the first error.
-  error_ = true;
-  char buffer[256];
+  // Only report the first error.
+  if (error()) return;
 
+  char buffer[256];
   va_list arguments;
   va_start(arguments, format);
   base::OS::VSNPrintF(buffer, 255, format, arguments);
@@ -44,8 +43,13 @@
   }
   str << buffer;
 
-  isolate_->ScheduleThrow(
-      *isolate_->factory()->NewStringFromAsciiChecked(str.str().c_str()));
+  message_ = isolate_->factory()->NewStringFromAsciiChecked(str.str().c_str());
+}
+
+ErrorThrower::~ErrorThrower() {
+  if (error() && !isolate_->has_pending_exception()) {
+    isolate_->ScheduleThrow(*message_);
+  }
 }
 }  // namespace wasm
 }  // namespace internal
diff --git a/src/wasm/wasm-result.h b/src/wasm/wasm-result.h
index b650c33..e741de8 100644
--- a/src/wasm/wasm-result.h
+++ b/src/wasm/wasm-result.h
@@ -8,6 +8,7 @@
 #include "src/base/compiler-specific.h"
 #include "src/base/smart-pointers.h"
 
+#include "src/handles.h"
 #include "src/globals.h"
 
 namespace v8 {
@@ -38,8 +39,7 @@
 // The overall result of decoding a function or a module.
 template <typename T>
 struct Result {
-  Result()
-      : val(nullptr), error_code(kSuccess), start(nullptr), error_pc(nullptr) {
+  Result() : val(), error_code(kSuccess), start(nullptr), error_pc(nullptr) {
     error_msg.Reset(nullptr);
   }
 
@@ -92,7 +92,8 @@
 class ErrorThrower {
  public:
   ErrorThrower(Isolate* isolate, const char* context)
-      : isolate_(isolate), context_(context), error_(false) {}
+      : isolate_(isolate), context_(context) {}
+  ~ErrorThrower();
 
   PRINTF_FORMAT(2, 3) void Error(const char* fmt, ...);
 
@@ -103,12 +104,18 @@
     return Error("%s", str.str().c_str());
   }
 
-  bool error() const { return error_; }
+  i::Handle<i::String> Reify() {
+    auto result = message_;
+    message_ = i::Handle<i::String>();
+    return result;
+  }
+
+  bool error() const { return !message_.is_null(); }
 
  private:
   Isolate* isolate_;
   const char* context_;
-  bool error_;
+  i::Handle<i::String> message_;
 };
 }  // namespace wasm
 }  // namespace internal
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 0af8f93..60acacd 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -179,12 +179,10 @@
   if (rm_reg.high_bit()) emit(0x41);
 }
 
-
 void Assembler::emit_optional_rex_32(XMMRegister rm_reg) {
   if (rm_reg.high_bit()) emit(0x41);
 }
 
-
 void Assembler::emit_optional_rex_32(const Operand& op) {
   if (op.rex_ != 0) emit(0x40 | op.rex_);
 }
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 5f8fb68..3345e30 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -122,36 +122,24 @@
   return Memory::Address_at(pc_);
 }
 
+Address RelocInfo::wasm_global_reference() {
+  DCHECK(IsWasmGlobalReference(rmode_));
+  return Memory::Address_at(pc_);
+}
+
 uint32_t RelocInfo::wasm_memory_size_reference() {
   DCHECK(IsWasmMemorySizeReference(rmode_));
   return Memory::uint32_at(pc_);
 }
 
-void RelocInfo::update_wasm_memory_reference(
-    Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
-    ICacheFlushMode icache_flush_mode) {
-  DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
-  if (IsWasmMemoryReference(rmode_)) {
-    Address updated_reference;
-    DCHECK(old_base <= wasm_memory_reference() &&
-           wasm_memory_reference() < old_base + old_size);
-    updated_reference = new_base + (wasm_memory_reference() - old_base);
-    DCHECK(new_base <= updated_reference &&
-           updated_reference < new_base + new_size);
-    Memory::Address_at(pc_) = updated_reference;
-  } else if (IsWasmMemorySizeReference(rmode_)) {
-    uint32_t updated_size_reference;
-    DCHECK(wasm_memory_size_reference() <= old_size);
-    updated_size_reference =
-        new_size + (wasm_memory_size_reference() - old_size);
-    DCHECK(updated_size_reference <= new_size);
-    Memory::uint32_at(pc_) = updated_size_reference;
-  } else {
-    UNREACHABLE();
-  }
-  if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
-    Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
-  }
+void RelocInfo::unchecked_update_wasm_memory_reference(
+    Address address, ICacheFlushMode flush_mode) {
+  Memory::Address_at(pc_) = address;
+}
+
+void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
+                                                  ICacheFlushMode flush_mode) {
+  Memory::uint32_at(pc_) = size;
 }
 
 // -----------------------------------------------------------------------------
@@ -332,6 +320,8 @@
       static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
   desc->origin = this;
   desc->constant_pool_size = 0;
+  desc->unwinding_info_size = 0;
+  desc->unwinding_info = nullptr;
 }
 
 
@@ -599,12 +589,9 @@
                                         int size) {
   EnsureSpace ensure_space(this);
   emit_rex(dst, size);
-  if (is_int8(src.value_)) {
+  if (is_int8(src.value_) && RelocInfo::IsNone(src.rmode_)) {
     emit(0x83);
     emit_modrm(subcode, dst);
-    if (!RelocInfo::IsNone(src.rmode_)) {
-      RecordRelocInfo(src.rmode_);
-    }
     emit(src.value_);
   } else if (dst.is(rax)) {
     emit(0x05 | (subcode << 3));
@@ -848,7 +835,6 @@
 
 
 void Assembler::call(Label* L) {
-  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   // 1110 1000 #32-bit disp.
   emit(0xE8);
@@ -870,7 +856,6 @@
 
 void Assembler::call(Address entry, RelocInfo::Mode rmode) {
   DCHECK(RelocInfo::IsRuntimeEntry(rmode));
-  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   // 1110 1000 #32-bit disp.
   emit(0xE8);
@@ -881,7 +866,6 @@
 void Assembler::call(Handle<Code> target,
                      RelocInfo::Mode rmode,
                      TypeFeedbackId ast_id) {
-  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   // 1110 1000 #32-bit disp.
   emit(0xE8);
@@ -890,7 +874,6 @@
 
 
 void Assembler::call(Register adr) {
-  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   // Opcode: FF /2 r64.
   emit_optional_rex_32(adr);
@@ -900,7 +883,6 @@
 
 
 void Assembler::call(const Operand& op) {
-  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   // Opcode: FF /2 m64.
   emit_optional_rex_32(op);
@@ -914,7 +896,6 @@
 // same Code object. Should not be used when generating new code (use labels),
 // but only when patching existing code.
 void Assembler::call(Address target) {
-  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   // 1110 1000 #32-bit disp.
   emit(0xE8);
@@ -1016,6 +997,40 @@
   emit(imm8.value_);
 }
 
+void Assembler::lock() {
+  EnsureSpace ensure_space(this);
+  emit(0xf0);
+}
+
+void Assembler::cmpxchgb(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  if (!src.is_byte_register()) {
+    // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
+    emit_rex_32(src, dst);
+  } else {
+    emit_optional_rex_32(src, dst);
+  }
+  emit(0x0f);
+  emit(0xb0);
+  emit_operand(src, dst);
+}
+
+void Assembler::cmpxchgw(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(src, dst);
+  emit(0x0f);
+  emit(0xb1);
+  emit_operand(src, dst);
+}
+
+void Assembler::emit_cmpxchg(const Operand& dst, Register src, int size) {
+  EnsureSpace ensure_space(this);
+  emit_rex(src, dst, size);
+  emit(0x0f);
+  emit(0xb1);
+  emit_operand(src, dst);
+}
 
 void Assembler::cpuid() {
   EnsureSpace ensure_space(this);
@@ -2856,6 +2871,18 @@
   emit(imm8);
 }
 
+void Assembler::insertps(XMMRegister dst, XMMRegister src, byte imm8) {
+  DCHECK(CpuFeatures::IsSupported(SSE4_1));
+  DCHECK(is_uint8(imm8));
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x3A);
+  emit(0x21);
+  emit_sse_operand(dst, src);
+  emit(imm8);
+}
 
 void Assembler::movsd(const Operand& dst, XMMRegister src) {
   DCHECK(!IsEnabled(AVX));
@@ -3177,6 +3204,38 @@
   emit(imm8);
 }
 
+void Assembler::cmpps(XMMRegister dst, XMMRegister src, int8_t cmp) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0xC2);
+  emit_sse_operand(dst, src);
+  emit(cmp);
+}
+
+void Assembler::cmpeqps(XMMRegister dst, XMMRegister src) {
+  cmpps(dst, src, 0x0);
+}
+
+void Assembler::cmpltps(XMMRegister dst, XMMRegister src) {
+  cmpps(dst, src, 0x1);
+}
+
+void Assembler::cmpleps(XMMRegister dst, XMMRegister src) {
+  cmpps(dst, src, 0x2);
+}
+
+void Assembler::cmpneqps(XMMRegister dst, XMMRegister src) {
+  cmpps(dst, src, 0x4);
+}
+
+void Assembler::cmpnltps(XMMRegister dst, XMMRegister src) {
+  cmpps(dst, src, 0x5);
+}
+
+void Assembler::cmpnleps(XMMRegister dst, XMMRegister src) {
+  cmpps(dst, src, 0x6);
+}
 
 void Assembler::cvttss2si(Register dst, const Operand& src) {
   DCHECK(!IsEnabled(AVX));
@@ -3694,6 +3753,14 @@
   emit_sse_operand(dst, src);
 }
 
+void Assembler::punpckldq(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x62);
+  emit_sse_operand(dst, src);
+}
 
 void Assembler::punpckhdq(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
@@ -4158,6 +4225,246 @@
   emit(imm8);
 }
 
+void Assembler::minps(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x5D);
+  emit_sse_operand(dst, src);
+}
+
+void Assembler::minps(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x5D);
+  emit_sse_operand(dst, src);
+}
+
+void Assembler::maxps(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x5F);
+  emit_sse_operand(dst, src);
+}
+
+void Assembler::maxps(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x5F);
+  emit_sse_operand(dst, src);
+}
+
+void Assembler::rcpps(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x53);
+  emit_sse_operand(dst, src);
+}
+
+void Assembler::rcpps(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x53);
+  emit_sse_operand(dst, src);
+}
+
+void Assembler::rsqrtps(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x52);
+  emit_sse_operand(dst, src);
+}
+
+void Assembler::rsqrtps(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x52);
+  emit_sse_operand(dst, src);
+}
+
+void Assembler::sqrtps(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x51);
+  emit_sse_operand(dst, src);
+}
+
+void Assembler::sqrtps(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x51);
+  emit_sse_operand(dst, src);
+}
+
+void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x5B);
+  emit_sse_operand(dst, src);
+}
+
+void Assembler::cvtdq2ps(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x5B);
+  emit_sse_operand(dst, src);
+}
+
+void Assembler::movups(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  if (src.low_bits() == 4) {
+    // Try to avoid an unnecessary SIB byte.
+    emit_optional_rex_32(src, dst);
+    emit(0x0F);
+    emit(0x11);
+    emit_sse_operand(src, dst);
+  } else {
+    emit_optional_rex_32(dst, src);
+    emit(0x0F);
+    emit(0x10);
+    emit_sse_operand(dst, src);
+  }
+}
+
+void Assembler::movups(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x10);
+  emit_sse_operand(dst, src);
+}
+
+void Assembler::movups(const Operand& dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(src, dst);
+  emit(0x0F);
+  emit(0x11);
+  emit_sse_operand(src, dst);
+}
+
+void Assembler::paddd(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0xFE);
+  emit_sse_operand(dst, src);
+}
+
+void Assembler::paddd(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0xFE);
+  emit_sse_operand(dst, src);
+}
+
+void Assembler::psubd(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0xFA);
+  emit_sse_operand(dst, src);
+}
+
+void Assembler::psubd(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0xFA);
+  emit_sse_operand(dst, src);
+}
+
+void Assembler::pmulld(XMMRegister dst, XMMRegister src) {
+  DCHECK(IsEnabled(SSE4_1));
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x38);
+  emit(0x40);
+  emit_sse_operand(dst, src);
+}
+
+void Assembler::pmulld(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x38);
+  emit(0x40);
+  emit_sse_operand(dst, src);
+}
+
+void Assembler::pmuludq(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0xF4);
+  emit_sse_operand(dst, src);
+}
+
+void Assembler::pmuludq(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0xF4);
+  emit_sse_operand(dst, src);
+}
+
+void Assembler::psrldq(XMMRegister dst, uint8_t shift) {
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(dst);
+  emit(0x0F);
+  emit(0x73);
+  emit_sse_operand(dst);
+  emit(shift);
+}
+
+void Assembler::cvtps2dq(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x5B);
+  emit_sse_operand(dst, src);
+}
+
+void Assembler::cvtps2dq(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x5B);
+  emit_sse_operand(dst, src);
+}
+
+void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x70);
+  emit_sse_operand(dst, src);
+  emit(shuffle);
+}
 
 void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
   Register ireg = { reg.code() };
@@ -4185,6 +4492,10 @@
   emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
 }
 
+void Assembler::emit_sse_operand(XMMRegister dst) {
+  emit(0xD8 | dst.low_bits());
+}
+
 
 void Assembler::db(uint8_t data) {
   EnsureSpace ensure_space(this);
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 77a1a57..a7759c3 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -117,8 +117,6 @@
     Register r = {code};
     return r;
   }
-  const char* ToString();
-  bool IsAllocatable() const;
   bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
   bool is(Register reg) const { return reg_code == reg.reg_code; }
   int code() const {
@@ -186,6 +184,7 @@
 #define FLOAT_REGISTERS DOUBLE_REGISTERS
 
 #define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+  V(xmm0)                               \
   V(xmm1)                               \
   V(xmm2)                               \
   V(xmm3)                               \
@@ -199,8 +198,9 @@
   V(xmm11)                              \
   V(xmm12)                              \
   V(xmm13)                              \
-  V(xmm14)                              \
-  V(xmm15)
+  V(xmm14)
+
+static const bool kSimpleFPAliasing = true;
 
 struct XMMRegister {
   enum Code {
@@ -218,8 +218,6 @@
     return result;
   }
 
-  const char* ToString();
-  bool IsAllocatable() const;
   bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
   bool is(XMMRegister reg) const { return reg_code == reg.reg_code; }
   int code() const {
@@ -421,11 +419,11 @@
   friend class Assembler;
 };
 
-
 #define ASSEMBLER_INSTRUCTION_LIST(V) \
   V(add)                              \
   V(and)                              \
   V(cmp)                              \
+  V(cmpxchg)                          \
   V(dec)                              \
   V(idiv)                             \
   V(div)                              \
@@ -445,7 +443,6 @@
   V(xchg)                             \
   V(xor)
 
-
 // Shift instructions on operands/registers with kPointerSize, kInt32Size and
 // kInt64Size.
 #define SHIFT_INSTRUCTION_LIST(V)       \
@@ -788,9 +785,15 @@
   void decb(Register dst);
   void decb(const Operand& dst);
 
+  // Lock prefix.
+  void lock();
+
   void xchgb(Register reg, const Operand& op);
   void xchgw(Register reg, const Operand& op);
 
+  void cmpxchgb(const Operand& dst, Register src);
+  void cmpxchgw(const Operand& dst, Register src);
+
   // Sign-extends rax into rdx:rax.
   void cqo();
   // Sign-extends eax into edx:eax.
@@ -1149,19 +1152,55 @@
   void movmskpd(Register dst, XMMRegister src);
 
   void punpckldq(XMMRegister dst, XMMRegister src);
+  void punpckldq(XMMRegister dst, const Operand& src);
   void punpckhdq(XMMRegister dst, XMMRegister src);
 
   // SSE 4.1 instruction
+  void insertps(XMMRegister dst, XMMRegister src, byte imm8);
   void extractps(Register dst, XMMRegister src, byte imm8);
-
   void pextrd(Register dst, XMMRegister src, int8_t imm8);
-
   void pinsrd(XMMRegister dst, Register src, int8_t imm8);
   void pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
 
   void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
   void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
 
+  void cmpps(XMMRegister dst, XMMRegister src, int8_t cmp);
+  void cmpeqps(XMMRegister dst, XMMRegister src);
+  void cmpltps(XMMRegister dst, XMMRegister src);
+  void cmpleps(XMMRegister dst, XMMRegister src);
+  void cmpneqps(XMMRegister dst, XMMRegister src);
+  void cmpnltps(XMMRegister dst, XMMRegister src);
+  void cmpnleps(XMMRegister dst, XMMRegister src);
+
+  void minps(XMMRegister dst, XMMRegister src);
+  void minps(XMMRegister dst, const Operand& src);
+  void maxps(XMMRegister dst, XMMRegister src);
+  void maxps(XMMRegister dst, const Operand& src);
+  void rcpps(XMMRegister dst, XMMRegister src);
+  void rcpps(XMMRegister dst, const Operand& src);
+  void rsqrtps(XMMRegister dst, XMMRegister src);
+  void rsqrtps(XMMRegister dst, const Operand& src);
+  void sqrtps(XMMRegister dst, XMMRegister src);
+  void sqrtps(XMMRegister dst, const Operand& src);
+  void movups(XMMRegister dst, XMMRegister src);
+  void movups(XMMRegister dst, const Operand& src);
+  void movups(const Operand& dst, XMMRegister src);
+  void paddd(XMMRegister dst, XMMRegister src);
+  void paddd(XMMRegister dst, const Operand& src);
+  void psubd(XMMRegister dst, XMMRegister src);
+  void psubd(XMMRegister dst, const Operand& src);
+  void pmulld(XMMRegister dst, XMMRegister src);
+  void pmulld(XMMRegister dst, const Operand& src);
+  void pmuludq(XMMRegister dst, XMMRegister src);
+  void pmuludq(XMMRegister dst, const Operand& src);
+  void psrldq(XMMRegister dst, uint8_t shift);
+  void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
+  void cvtps2dq(XMMRegister dst, XMMRegister src);
+  void cvtps2dq(XMMRegister dst, const Operand& src);
+  void cvtdq2ps(XMMRegister dst, XMMRegister src);
+  void cvtdq2ps(XMMRegister dst, const Operand& src);
+
   // AVX instruction
   void vfmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
     vfmasd(0x99, dst, src1, src2);
@@ -1938,6 +1977,7 @@
   void emit_sse_operand(Register reg, const Operand& adr);
   void emit_sse_operand(XMMRegister dst, Register src);
   void emit_sse_operand(Register dst, XMMRegister src);
+  void emit_sse_operand(XMMRegister dst);
 
   // Emit machine code for one of the operations ADD, ADC, SUB, SBC,
   // AND, OR, XOR, or CMP.  The encodings of these operations are all
@@ -2054,6 +2094,11 @@
     immediate_arithmetic_op(0x7, dst, src, size);
   }
 
+  // Compare {al,ax,eax,rax} with src.  If equal, set ZF and write dst into
+  // src. Otherwise clear ZF and write src into {al,ax,eax,rax}.  This
+  // operation is only atomic if prefixed by the lock instruction.
+  void emit_cmpxchg(const Operand& dst, Register src, int size);
+
   void emit_dec(Register dst, int size);
   void emit_dec(const Operand& dst, int size);
 
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 419ee0f..fb43324 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -15,10 +15,7 @@
 
 #define __ ACCESS_MASM(masm)
 
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
-                                CFunctionId id,
-                                BuiltinExtraArguments extra_args) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
   // ----------- S t a t e -------------
   //  -- rax                 : number of arguments excluding receiver
   //  -- rdi                 : target
@@ -37,20 +34,13 @@
   // ordinary functions).
   __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
-  // Insert extra arguments.
-  int num_extra_args = 0;
-  if (extra_args != BuiltinExtraArguments::kNone) {
-    __ PopReturnAddressTo(kScratchRegister);
-    if (extra_args & BuiltinExtraArguments::kTarget) {
-      ++num_extra_args;
-      __ Push(rdi);
-    }
-    if (extra_args & BuiltinExtraArguments::kNewTarget) {
-      ++num_extra_args;
-      __ Push(rdx);
-    }
-    __ PushReturnAddressFrom(kScratchRegister);
-  }
+  // Unconditionally insert the target and new target as extra arguments. They
+  // will be used by stack frame iterators when constructing the stack trace.
+  const int num_extra_args = 2;
+  __ PopReturnAddressTo(kScratchRegister);
+  __ Push(rdi);
+  __ Push(rdx);
+  __ PushReturnAddressFrom(kScratchRegister);
 
   // JumpToExternalReference expects rax to contain the number of arguments
   // including the receiver and the extra arguments.
@@ -468,8 +458,8 @@
   __ AssertGeneratorObject(rbx);
 
   // Store input value into generator object.
-  __ movp(FieldOperand(rbx, JSGeneratorObject::kInputOffset), rax);
-  __ RecordWriteField(rbx, JSGeneratorObject::kInputOffset, rax, rcx,
+  __ movp(FieldOperand(rbx, JSGeneratorObject::kInputOrDebugPosOffset), rax);
+  __ RecordWriteField(rbx, JSGeneratorObject::kInputOrDebugPosOffset, rax, rcx,
                       kDontSaveFPRegs);
 
   // Store resume mode into generator object.
@@ -480,23 +470,23 @@
   __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
 
   // Flood function if we are stepping.
-  Label skip_flooding;
-  ExternalReference step_in_enabled =
-      ExternalReference::debug_step_in_enabled_address(masm->isolate());
-  Operand step_in_enabled_operand = masm->ExternalOperand(step_in_enabled);
-  __ cmpb(step_in_enabled_operand, Immediate(0));
-  __ j(equal, &skip_flooding);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ Push(rbx);
-    __ Push(rdx);
-    __ Push(rdi);
-    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
-    __ Pop(rdx);
-    __ Pop(rbx);
-    __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
-  }
-  __ bind(&skip_flooding);
+  Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+  Label stepping_prepared;
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(masm->isolate());
+  Operand last_step_action_operand = masm->ExternalOperand(last_step_action);
+  STATIC_ASSERT(StepFrame > StepIn);
+  __ cmpb(last_step_action_operand, Immediate(StepIn));
+  __ j(greater_equal, &prepare_step_in_if_stepping);
+
+  // Flood function if we need to continue stepping in the suspended generator.
+  ExternalReference debug_suspended_generator =
+      ExternalReference::debug_suspended_generator_address(masm->isolate());
+  Operand debug_suspended_generator_operand =
+      masm->ExternalOperand(debug_suspended_generator);
+  __ cmpp(rbx, debug_suspended_generator_operand);
+  __ j(equal, &prepare_step_in_suspended_generator);
+  __ bind(&stepping_prepared);
 
   // Pop return address.
   __ PopReturnAddressTo(rax);
@@ -596,6 +586,51 @@
     __ movp(rax, rbx);  // Continuation expects generator object in rax.
     __ jmp(rdx);
   }
+
+  __ bind(&prepare_step_in_if_stepping);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(rbx);
+    __ Push(rdx);
+    __ Push(rdi);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(rdx);
+    __ Pop(rbx);
+    __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
+  }
+  __ jmp(&stepping_prepared);
+
+  __ bind(&prepare_step_in_suspended_generator);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(rbx);
+    __ Push(rdx);
+    __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+    __ Pop(rdx);
+    __ Pop(rbx);
+    __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
+  }
+  __ jmp(&stepping_prepared);
+}
+
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
+                                  Register scratch2) {
+  Register args_count = scratch1;
+  Register return_pc = scratch2;
+
+  // Get the arguments + receiver count.
+  __ movp(args_count,
+          Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ movl(args_count,
+          FieldOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+  // Leave the frame (also dropping the register file).
+  __ leave();
+
+  // Drop receiver + arguments.
+  __ PopReturnAddressTo(return_pc);
+  __ addp(rsp, args_count);
+  __ PushReturnAddressFrom(return_pc);
 }
 
 // Generate code for entering a JS function with the interpreter.
@@ -702,18 +737,7 @@
   masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
 
   // The return value is in rax.
-
-  // Get the arguments + reciever count.
-  __ movp(rbx, Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
-  __ movl(rbx, FieldOperand(rbx, BytecodeArray::kParameterSizeOffset));
-
-  // Leave the frame (also dropping the register file).
-  __ leave();
-
-  // Drop receiver + arguments and return.
-  __ PopReturnAddressTo(rcx);
-  __ addp(rsp, rbx);
-  __ PushReturnAddressFrom(rcx);
+  LeaveInterpreterFrame(masm, rbx, rcx);
   __ ret(0);
 
   // Load debug copy of the bytecode array.
@@ -737,6 +761,31 @@
   __ jmp(rcx);
 }
 
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+  // Save the function and context for call to CompileBaseline.
+  __ movp(rdi, Operand(rbp, StandardFrameConstants::kFunctionOffset));
+  __ movp(kContextRegister,
+          Operand(rbp, StandardFrameConstants::kContextOffset));
+
+  // Leave the frame before recompiling for baseline so that we don't count as
+  // an activation on the stack.
+  LeaveInterpreterFrame(masm, rbx, rcx);
+
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    // Push return value.
+    __ Push(rax);
+
+    // Push function as argument and compile for baseline.
+    __ Push(rdi);
+    __ CallRuntime(Runtime::kCompileBaseline);
+
+    // Restore return value.
+    __ Pop(rax);
+  }
+  __ ret(0);
+}
+
 static void Generate_InterpreterPushArgs(MacroAssembler* masm,
                                          bool push_receiver) {
   // ----------- S t a t e -------------
@@ -904,13 +953,30 @@
   const int bailout_id = BailoutId::None().ToInt();
   __ cmpl(temp, Immediate(bailout_id));
   __ j(not_equal, &loop_bottom);
+
   // Literals available?
+  Label got_literals, maybe_cleared_weakcell;
   __ movp(temp, FieldOperand(map, index, times_pointer_size,
                              SharedFunctionInfo::kOffsetToPreviousLiterals));
+  // temp contains either a WeakCell pointing to the literals array or the
+  // literals array directly.
+  STATIC_ASSERT(WeakCell::kValueOffset == FixedArray::kLengthOffset);
+  __ movp(r15, FieldOperand(temp, WeakCell::kValueOffset));
+  __ JumpIfSmi(r15, &maybe_cleared_weakcell);
+  // r15 is a pointer, therefore temp is a WeakCell pointing to a literals
+  // array.
   __ movp(temp, FieldOperand(temp, WeakCell::kValueOffset));
-  __ JumpIfSmi(temp, &gotta_call_runtime);
+  __ jmp(&got_literals);
+
+  // r15 is a smi. If it's 0, then we are looking at a cleared WeakCell
+  // around the literals array, and we should visit the runtime. If it's > 0,
+  // then temp already contains the literals array.
+  __ bind(&maybe_cleared_weakcell);
+  __ cmpp(r15, Immediate(0));
+  __ j(equal, &gotta_call_runtime);
 
   // Save the literals in the closure.
+  __ bind(&got_literals);
   __ movp(FieldOperand(closure, JSFunction::kLiteralsOffset), temp);
   __ movp(r15, index);
   __ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, r15,
@@ -1169,6 +1235,9 @@
 void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
                                                int field_index) {
   // ----------- S t a t e -------------
+  //  -- rax    : number of arguments
+  //  -- rdi    : function
+  //  -- rsi    : context
   //  -- rsp[0] : return address
   //  -- rsp[8] : receiver
   // -----------------------------------
@@ -1210,7 +1279,11 @@
   __ bind(&receiver_not_date);
   {
     FrameScope scope(masm, StackFrame::MANUAL);
-    __ EnterFrame(StackFrame::INTERNAL);
+    __ Push(rbp);
+    __ Move(rbp, rsp);
+    __ Push(rsi);
+    __ Push(rdi);
+    __ Push(Immediate(0));
     __ CallRuntime(Runtime::kThrowNotDateError);
   }
 }
@@ -1561,6 +1634,8 @@
 void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
   // ----------- S t a t e -------------
   //  -- rax                 : number of arguments
+  //  -- rdi                 : function
+  //  -- rsi                 : context
   //  -- rsp[0]              : return address
   //  -- rsp[(argc - n) * 8] : arg[n] (zero-based)
   //  -- rsp[(argc + 1) * 8] : receiver
@@ -1588,27 +1663,32 @@
     __ movp(rbx, Operand(rsp, rcx, times_pointer_size, 0));
 
     // Load the double value of the parameter into xmm1, maybe converting the
-    // parameter to a number first using the ToNumberStub if necessary.
+    // parameter to a number first using the ToNumber builtin if necessary.
     Label convert, convert_smi, convert_number, done_convert;
     __ bind(&convert);
     __ JumpIfSmi(rbx, &convert_smi);
     __ JumpIfRoot(FieldOperand(rbx, HeapObject::kMapOffset),
                   Heap::kHeapNumberMapRootIndex, &convert_number);
     {
-      // Parameter is not a Number, use the ToNumberStub to convert it.
-      FrameScope scope(masm, StackFrame::INTERNAL);
+      // Parameter is not a Number, use the ToNumber builtin to convert it.
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ Push(rbp);
+      __ Move(rbp, rsp);
+      __ Push(rsi);
+      __ Push(rdi);
       __ Integer32ToSmi(rax, rax);
       __ Integer32ToSmi(rcx, rcx);
       __ Push(rax);
       __ Push(rcx);
       __ Push(rdx);
       __ movp(rax, rbx);
-      ToNumberStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
       __ movp(rbx, rax);
       __ Pop(rdx);
       __ Pop(rcx);
       __ Pop(rax);
+      __ Pop(rdi);
+      __ Pop(rsi);
       {
         // Restore the double accumulator value (xmm0).
         Label restore_smi, done_restore;
@@ -1621,6 +1701,7 @@
       }
       __ SmiToInteger32(rcx, rcx);
       __ SmiToInteger32(rax, rax);
+      __ leave();
     }
     __ jmp(&convert);
     __ bind(&convert_number);
@@ -1694,8 +1775,7 @@
   }
 
   // 2a. Convert the first argument to a number.
-  ToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
+  __ Jump(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
 
   // 2b. No arguments, return +0 (already in rax).
   __ bind(&no_arguments);
@@ -1746,8 +1826,7 @@
       __ Push(rdx);
       __ Push(rdi);
       __ Move(rax, rbx);
-      ToNumberStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
       __ Move(rbx, rax);
       __ Pop(rdi);
       __ Pop(rdx);
@@ -2001,6 +2080,81 @@
   __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
 }
 
+void Builtins::Generate_StringToNumber(MacroAssembler* masm) {
+  // The StringToNumber stub takes one argument in rax.
+  __ AssertString(rax);
+
+  // Check if string has a cached array index.
+  Label runtime;
+  __ testl(FieldOperand(rax, String::kHashFieldOffset),
+           Immediate(String::kContainsCachedArrayIndexMask));
+  __ j(not_zero, &runtime, Label::kNear);
+  __ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
+  __ IndexFromHash(rax, rax);
+  __ Ret();
+
+  __ bind(&runtime);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    // Push argument.
+    __ Push(rax);
+    // We cannot use a tail call here because this builtin can also be called
+    // from wasm.
+    __ CallRuntime(Runtime::kStringToNumber);
+  }
+  __ Ret();
+}
+
+// static
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+  // The ToNumber stub takes one argument in rax.
+  Label not_smi;
+  __ JumpIfNotSmi(rax, &not_smi, Label::kNear);
+  __ Ret();
+  __ bind(&not_smi);
+
+  Label not_heap_number;
+  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+                 Heap::kHeapNumberMapRootIndex);
+  __ j(not_equal, &not_heap_number, Label::kNear);
+  __ Ret();
+  __ bind(&not_heap_number);
+
+  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+          RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_NonNumberToNumber(MacroAssembler* masm) {
+  // The NonNumberToNumber stub takes one argument in rax.
+  __ AssertNotNumber(rax);
+
+  Label not_string;
+  __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdi);
+  // rax: object
+  // rdi: object map
+  __ j(above_equal, &not_string, Label::kNear);
+  __ Jump(masm->isolate()->builtins()->StringToNumber(),
+          RelocInfo::CODE_TARGET);
+  __ bind(&not_string);
+
+  Label not_oddball;
+  __ CmpInstanceType(rdi, ODDBALL_TYPE);
+  __ j(not_equal, &not_oddball, Label::kNear);
+  __ movp(rax, FieldOperand(rax, Oddball::kToNumberOffset));
+  __ Ret();
+  __ bind(&not_oddball);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    // Push argument.
+    __ Push(rax);
+    // We cannot use a tail call here because this builtin can also be called
+    // from wasm.
+    __ CallRuntime(Runtime::kToNumber);
+  }
+  __ Ret();
+}
+
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax : actual number of arguments
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 602d3a0..b89438f 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -20,71 +20,29 @@
 namespace v8 {
 namespace internal {
 
+#define __ ACCESS_MASM(masm)
 
-static void InitializeArrayConstructorDescriptor(
-    Isolate* isolate, CodeStubDescriptor* descriptor,
-    int constant_stack_parameter_count) {
-  Address deopt_handler = Runtime::FunctionForId(
-      Runtime::kArrayConstructor)->entry;
-
-  if (constant_stack_parameter_count == 0) {
-    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  } else {
-    descriptor->Initialize(rax, deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  }
+void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
+  __ popq(rcx);
+  __ movq(MemOperand(rsp, rax, times_8, 0), rdi);
+  __ pushq(rdi);
+  __ pushq(rbx);
+  __ pushq(rcx);
+  __ addq(rax, Immediate(3));
+  __ TailCallRuntime(Runtime::kNewArray);
 }
 
-
-static void InitializeInternalArrayConstructorDescriptor(
-    Isolate* isolate, CodeStubDescriptor* descriptor,
-    int constant_stack_parameter_count) {
-  Address deopt_handler = Runtime::FunctionForId(
-      Runtime::kInternalArrayConstructor)->entry;
-
-  if (constant_stack_parameter_count == 0) {
-    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  } else {
-    descriptor->Initialize(rax, deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  }
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
   Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
   descriptor->Initialize(rax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
 }
 
-void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+void FastFunctionBindStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
+  Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
+  descriptor->Initialize(rax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
 }
 
-
-void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-
 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
                                                ExternalReference miss) {
   // Update the static counter each time a new code stub is generated.
@@ -174,7 +132,7 @@
 
     bool stash_exponent_copy = !input_reg.is(rsp);
     __ movl(scratch1, mantissa_operand);
-    __ Movsd(xmm0, mantissa_operand);
+    __ Movsd(kScratchDoubleReg, mantissa_operand);
     __ movl(rcx, exponent_operand);
     if (stash_exponent_copy) __ pushq(rcx);
 
@@ -194,7 +152,7 @@
     __ jmp(&check_negative);
 
     __ bind(&process_64_bits);
-    __ Cvttsd2siq(result_reg, xmm0);
+    __ Cvttsd2siq(result_reg, kScratchDoubleReg);
     __ jmp(&done, Label::kNear);
 
     // If the double was negative, negate the integer result.
@@ -547,7 +505,6 @@
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
                                           &miss,  // When index out of range.
-                                          STRING_INDEX_IS_ARRAY_INDEX,
                                           RECEIVER_IS_STRING);
   char_at_generator.GenerateFast(masm);
   __ ret(0);
@@ -1343,8 +1300,8 @@
   // rdx : slot in feedback vector (Smi)
   // rdi : the function to call
   Isolate* isolate = masm->isolate();
-  Label initialize, done, miss, megamorphic, not_array_function,
-      done_no_smi_convert;
+  Label initialize, done, miss, megamorphic, not_array_function;
+  Label done_initialize_count, done_increment_count;
 
   // Load the cache state into r11.
   __ SmiToInteger32(rdx, rdx);
@@ -1358,7 +1315,7 @@
   // type-feedback-vector.h).
   Label check_allocation_site;
   __ cmpp(rdi, FieldOperand(r11, WeakCell::kValueOffset));
-  __ j(equal, &done, Label::kFar);
+  __ j(equal, &done_increment_count, Label::kFar);
   __ CompareRoot(r11, Heap::kmegamorphic_symbolRootIndex);
   __ j(equal, &done, Label::kFar);
   __ CompareRoot(FieldOperand(r11, HeapObject::kMapOffset),
@@ -1382,7 +1339,7 @@
   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r11);
   __ cmpp(rdi, r11);
   __ j(not_equal, &megamorphic);
-  __ jmp(&done);
+  __ jmp(&done_increment_count);
 
   __ bind(&miss);
 
@@ -1408,17 +1365,29 @@
 
   CreateAllocationSiteStub create_stub(isolate);
   CallStubInRecordCallTarget(masm, &create_stub);
-  __ jmp(&done_no_smi_convert);
+  __ jmp(&done_initialize_count);
 
   __ bind(&not_array_function);
   CreateWeakCellStub weak_cell_stub(isolate);
   CallStubInRecordCallTarget(masm, &weak_cell_stub);
-  __ jmp(&done_no_smi_convert);
+
+  __ bind(&done_initialize_count);
+  // Initialize the call counter.
+  __ SmiToInteger32(rdx, rdx);
+  __ Move(FieldOperand(rbx, rdx, times_pointer_size,
+                       FixedArray::kHeaderSize + kPointerSize),
+          Smi::FromInt(1));
+  __ jmp(&done);
+
+  __ bind(&done_increment_count);
+
+  // Increment the call count for monomorphic function calls.
+  __ SmiAddConstant(FieldOperand(rbx, rdx, times_pointer_size,
+                                 FixedArray::kHeaderSize + kPointerSize),
+                    Smi::FromInt(1));
 
   __ bind(&done);
   __ Integer32ToSmi(rdx, rdx);
-
-  __ bind(&done_no_smi_convert);
 }
 
 
@@ -1479,7 +1448,7 @@
   // Increment the call count for monomorphic function calls.
   __ SmiAddConstant(FieldOperand(rbx, rdx, times_pointer_size,
                                  FixedArray::kHeaderSize + kPointerSize),
-                    Smi::FromInt(CallICNexus::kCallCountIncrement));
+                    Smi::FromInt(1));
 
   __ movp(rbx, rcx);
   __ movp(rdx, rdi);
@@ -1529,7 +1498,7 @@
   // Increment the call count for monomorphic function calls.
   __ SmiAddConstant(FieldOperand(rbx, rdx, times_pointer_size,
                                  FixedArray::kHeaderSize + kPointerSize),
-                    Smi::FromInt(CallICNexus::kCallCountIncrement));
+                    Smi::FromInt(1));
 
   __ bind(&call_function);
   __ Set(rax, argc);
@@ -1599,7 +1568,7 @@
   // Initialize the call counter.
   __ Move(FieldOperand(rbx, rdx, times_pointer_size,
                        FixedArray::kHeaderSize + kPointerSize),
-          Smi::FromInt(CallICNexus::kCallCountIncrement));
+          Smi::FromInt(1));
 
   // Store the function. Use a stub since we need a frame for allocation.
   // rbx - vector
@@ -1656,7 +1625,7 @@
   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
   // It is important that the store buffer overflow stubs are generated first.
-  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+  CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
   CreateWeakCellStub::GenerateAheadOfTime(isolate);
   BinaryOpICStub::GenerateAheadOfTime(isolate);
@@ -2056,13 +2025,7 @@
   }
   __ Push(object_);
   __ Push(index_);  // Consumed by runtime conversion function.
-  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
-    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
-  } else {
-    DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
-    // NumberToSmi discards numbers that are not exact integers.
-    __ CallRuntime(Runtime::kNumberToSmi);
-  }
+  __ CallRuntime(Runtime::kNumberToSmi);
   if (!index_.is(rax)) {
     // Save the conversion result before the pop instructions below
     // have a chance to overwrite it.
@@ -2383,78 +2346,12 @@
   // rcx: sub string length (smi)
   // rdx: from index (smi)
   StringCharAtGenerator generator(rax, rdx, rcx, rax, &runtime, &runtime,
-                                  &runtime, STRING_INDEX_IS_NUMBER,
-                                  RECEIVER_IS_STRING);
+                                  &runtime, RECEIVER_IS_STRING);
   generator.GenerateFast(masm);
   __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
   generator.SkipSlow(masm, &runtime);
 }
 
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
-  // The ToNumber stub takes one argument in rax.
-  Label not_smi;
-  __ JumpIfNotSmi(rax, &not_smi, Label::kNear);
-  __ Ret();
-  __ bind(&not_smi);
-
-  Label not_heap_number;
-  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
-                 Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, &not_heap_number, Label::kNear);
-  __ Ret();
-  __ bind(&not_heap_number);
-
-  NonNumberToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
-}
-
-void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
-  // The NonNumberToNumber stub takes one argument in rax.
-  __ AssertNotNumber(rax);
-
-  Label not_string;
-  __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdi);
-  // rax: object
-  // rdi: object map
-  __ j(above_equal, &not_string, Label::kNear);
-  StringToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
-  __ bind(&not_string);
-
-  Label not_oddball;
-  __ CmpInstanceType(rdi, ODDBALL_TYPE);
-  __ j(not_equal, &not_oddball, Label::kNear);
-  __ movp(rax, FieldOperand(rax, Oddball::kToNumberOffset));
-  __ Ret();
-  __ bind(&not_oddball);
-
-  __ PopReturnAddressTo(rcx);     // Pop return address.
-  __ Push(rax);                   // Push argument.
-  __ PushReturnAddressFrom(rcx);  // Push return address.
-  __ TailCallRuntime(Runtime::kToNumber);
-}
-
-void StringToNumberStub::Generate(MacroAssembler* masm) {
-  // The StringToNumber stub takes one argument in rax.
-  __ AssertString(rax);
-
-  // Check if string has a cached array index.
-  Label runtime;
-  __ testl(FieldOperand(rax, String::kHashFieldOffset),
-           Immediate(String::kContainsCachedArrayIndexMask));
-  __ j(not_zero, &runtime, Label::kNear);
-  __ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
-  __ IndexFromHash(rax, rax);
-  __ Ret();
-
-  __ bind(&runtime);
-  __ PopReturnAddressTo(rcx);     // Pop return address.
-  __ Push(rax);                   // Push argument.
-  __ PushReturnAddressFrom(rcx);  // Push return address.
-  __ TailCallRuntime(Runtime::kStringToNumber);
-}
-
 void ToStringStub::Generate(MacroAssembler* masm) {
   // The ToString stub takes one argument in rax.
   Label is_number;
@@ -2489,7 +2386,6 @@
   __ TailCallRuntime(Runtime::kToString);
 }
 
-
 void ToNameStub::Generate(MacroAssembler* masm) {
   // The ToName stub takes one argument in rax.
   Label is_number;
@@ -2672,7 +2568,7 @@
   // Load rcx with the allocation site.  We stick an undefined dummy value here
   // and replace it with the real allocation site later when we instantiate this
   // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
-  __ Move(rcx, handle(isolate()->heap()->undefined_value()));
+  __ Move(rcx, isolate()->factory()->undefined_value());
 
   // Make sure that we actually patched the allocation site.
   if (FLAG_debug_code) {
@@ -3482,14 +3378,14 @@
 
 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  LoadICStub stub(isolate(), state());
+  LoadICStub stub(isolate());
   stub.GenerateForTrampoline(masm);
 }
 
 
 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  KeyedLoadICStub stub(isolate(), state());
+  KeyedLoadICStub stub(isolate());
   stub.GenerateForTrampoline(masm);
 }
 
@@ -3970,9 +3866,6 @@
   // rdi - constructor?
   // rsp[0] - return address
   // rsp[8] - last argument
-  Handle<Object> undefined_sentinel(
-      masm->isolate()->heap()->undefined_value(),
-      masm->isolate());
 
   Label normal_sequence;
   if (mode == DONT_OVERRIDE) {
@@ -4063,19 +3956,14 @@
   }
 }
 
-
-void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
       isolate);
   ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
       isolate);
-  ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
-      isolate);
-}
+  ArrayNArgumentsConstructorStub stub(isolate);
+  stub.GetCode();
 
-
-void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
-    Isolate* isolate) {
   ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
   for (int i = 0; i < 2; i++) {
     // For internal arrays we only need a few things
@@ -4083,8 +3971,6 @@
     stubh1.GetCode();
     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
     stubh2.GetCode();
-    InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
-    stubh3.GetCode();
   }
 }
 
@@ -4104,13 +3990,15 @@
     CreateArrayDispatchOneArgument(masm, mode);
 
     __ bind(&not_one_case);
-    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+    ArrayNArgumentsConstructorStub stub(masm->isolate());
+    __ TailCallStub(&stub);
   } else if (argument_count() == NONE) {
     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
   } else if (argument_count() == ONE) {
     CreateArrayDispatchOneArgument(masm, mode);
   } else if (argument_count() == MORE_THAN_ONE) {
-    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+    ArrayNArgumentsConstructorStub stub(masm->isolate());
+    __ TailCallStub(&stub);
   } else {
     UNREACHABLE();
   }
@@ -4229,7 +4117,7 @@
   __ TailCallStub(&stub1);
 
   __ bind(&not_one_case);
-  InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+  ArrayNArgumentsConstructorStub stubN(isolate());
   __ TailCallStub(&stubN);
 }
 
@@ -4494,6 +4382,7 @@
                              1 * kPointerSize));
 
     // ----------- S t a t e -------------
+    //  -- rdi    : function
     //  -- rsi    : context
     //  -- rax    : number of rest parameters
     //  -- rbx    : pointer to first rest parameters
@@ -4504,7 +4393,7 @@
     Label allocate, done_allocate;
     __ leal(rcx, Operand(rax, times_pointer_size,
                          JSArray::kSize + FixedArray::kHeaderSize));
-    __ Allocate(rcx, rdx, rdi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
+    __ Allocate(rcx, rdx, r8, no_reg, &allocate, NO_ALLOCATION_FLAGS);
     __ bind(&done_allocate);
 
     // Compute the arguments.length in rdi.
@@ -4542,8 +4431,11 @@
     STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
     __ Ret();
 
-    // Fall back to %AllocateInNewSpace.
+    // Fall back to %AllocateInNewSpace (if not too big).
+    Label too_big_for_new_space;
     __ bind(&allocate);
+    __ cmpl(rcx, Immediate(Page::kMaxRegularHeapObjectSize));
+    __ j(greater, &too_big_for_new_space);
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
       __ Integer32ToSmi(rax, rax);
@@ -4558,6 +4450,13 @@
       __ SmiToInteger32(rax, rax);
     }
     __ jmp(&done_allocate);
+
+    // Fall back to %NewRestParameter.
+    __ bind(&too_big_for_new_space);
+    __ PopReturnAddressTo(kScratchRegister);
+    __ Push(rdi);
+    __ PushReturnAddressFrom(kScratchRegister);
+    __ TailCallRuntime(Runtime::kNewRestParameter);
   }
 }
 
@@ -4848,6 +4747,7 @@
   // ----------- S t a t e -------------
   //  -- rax    : number of arguments
   //  -- rbx    : pointer to the first argument
+  //  -- rdi    : function
   //  -- rsi    : context
   //  -- rsp[0] : return address
   // -----------------------------------
@@ -4856,7 +4756,7 @@
   Label allocate, done_allocate;
   __ leal(rcx, Operand(rax, times_pointer_size, JSStrictArgumentsObject::kSize +
                                                     FixedArray::kHeaderSize));
-  __ Allocate(rcx, rdx, rdi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
+  __ Allocate(rcx, rdx, r8, no_reg, &allocate, NO_ALLOCATION_FLAGS);
   __ bind(&done_allocate);
 
   // Compute the arguments.length in rdi.
@@ -4894,8 +4794,11 @@
   STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
   __ Ret();
 
-  // Fall back to %AllocateInNewSpace.
+  // Fall back to %AllocateInNewSpace (if not too big).
+  Label too_big_for_new_space;
   __ bind(&allocate);
+  __ cmpl(rcx, Immediate(Page::kMaxRegularHeapObjectSize));
+  __ j(greater, &too_big_for_new_space);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     __ Integer32ToSmi(rax, rax);
@@ -4910,37 +4813,13 @@
     __ SmiToInteger32(rax, rax);
   }
   __ jmp(&done_allocate);
-}
 
-
-void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
-  Register context_reg = rsi;
-  Register slot_reg = rbx;
-  Register result_reg = rax;
-  Label slow_case;
-
-  // Go up context chain to the script context.
-  for (int i = 0; i < depth(); ++i) {
-    __ movp(rdi, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
-    context_reg = rdi;
-  }
-
-  // Load the PropertyCell value at the specified slot.
-  __ movp(result_reg, ContextOperand(context_reg, slot_reg));
-  __ movp(result_reg, FieldOperand(result_reg, PropertyCell::kValueOffset));
-
-  // Check that value is not the_hole.
-  __ CompareRoot(result_reg, Heap::kTheHoleValueRootIndex);
-  __ j(equal, &slow_case, Label::kNear);
-  __ Ret();
-
-  // Fallback to the runtime.
-  __ bind(&slow_case);
-  __ Integer32ToSmi(slot_reg, slot_reg);
+  // Fall back to %NewStrictArguments.
+  __ bind(&too_big_for_new_space);
   __ PopReturnAddressTo(kScratchRegister);
-  __ Push(slot_reg);
-  __ Push(kScratchRegister);
-  __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
+  __ Push(rdi);
+  __ PushReturnAddressFrom(kScratchRegister);
+  __ TailCallRuntime(Runtime::kNewStrictArguments);
 }
 
 
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index d4f8b29..a181377 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -295,8 +295,8 @@
                                   Register r2,
                                   Register r3) {
       for (int i = 0; i < Register::kNumRegisters; i++) {
-        Register candidate = Register::from_code(i);
-        if (candidate.IsAllocatable()) {
+        if (RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(i)) {
+          Register candidate = Register::from_code(i);
           if (candidate.is(rcx)) continue;
           if (candidate.is(r1)) continue;
           if (candidate.is(r2)) continue;
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 114cbdc..911f3cb 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -32,38 +32,6 @@
 #define __ masm.
 
 
-UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
-  size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
-  if (buffer == nullptr) return nullptr;
-  ExternalReference::InitializeMathExpData();
-
-  MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
-                      CodeObjectRequired::kNo);
-  // xmm0: raw double input.
-  XMMRegister input = xmm0;
-  XMMRegister result = xmm1;
-  __ pushq(rax);
-  __ pushq(rbx);
-
-  MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx);
-
-  __ popq(rbx);
-  __ popq(rax);
-  __ Movsd(xmm0, result);
-  __ Ret();
-
-  CodeDesc desc;
-  masm.GetCode(&desc);
-  DCHECK(!RelocInfo::RequiresRelocation(desc));
-
-  Assembler::FlushICache(isolate, buffer, actual_size);
-  base::OS::ProtectCode(buffer, actual_size);
-  return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
-}
-
-
 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
   size_t actual_size;
   // Allocate buffer in executable space.
@@ -243,8 +211,9 @@
   // rbx: current element (smi-tagged)
   __ JumpIfNotSmi(rbx, &convert_hole);
   __ SmiToInteger32(rbx, rbx);
-  __ Cvtlsi2sd(xmm0, rbx);
-  __ Movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), xmm0);
+  __ Cvtlsi2sd(kScratchDoubleReg, rbx);
+  __ Movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
+           kScratchDoubleReg);
   __ jmp(&entry);
   __ bind(&convert_hole);
 
@@ -498,59 +467,6 @@
   __ bind(&done);
 }
 
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
-                                   XMMRegister input,
-                                   XMMRegister result,
-                                   XMMRegister double_scratch,
-                                   Register temp1,
-                                   Register temp2) {
-  DCHECK(!input.is(result));
-  DCHECK(!input.is(double_scratch));
-  DCHECK(!result.is(double_scratch));
-  DCHECK(!temp1.is(temp2));
-  DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
-  DCHECK(!masm->serializer_enabled());  // External references not serializable.
-
-  Label done;
-
-  __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
-  __ Movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize));
-  __ Xorpd(result, result);
-  __ Ucomisd(double_scratch, input);
-  __ j(above_equal, &done);
-  __ Ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize));
-  __ Movsd(result, Operand(kScratchRegister, 2 * kDoubleSize));
-  __ j(above_equal, &done);
-  __ Movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize));
-  __ Movsd(result, Operand(kScratchRegister, 4 * kDoubleSize));
-  __ Mulsd(double_scratch, input);
-  __ Addsd(double_scratch, result);
-  __ Movq(temp2, double_scratch);
-  __ Subsd(double_scratch, result);
-  __ Movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
-  __ leaq(temp1, Operand(temp2, 0x1ff800));
-  __ andq(temp2, Immediate(0x7ff));
-  __ shrq(temp1, Immediate(11));
-  __ Mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
-  __ Move(kScratchRegister, ExternalReference::math_exp_log_table());
-  __ shlq(temp1, Immediate(52));
-  __ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0));
-  __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
-  __ Subsd(double_scratch, input);
-  __ Movsd(input, double_scratch);
-  __ Subsd(result, double_scratch);
-  __ Mulsd(input, double_scratch);
-  __ Mulsd(result, input);
-  __ Movq(input, temp1);
-  __ Mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize));
-  __ Subsd(result, double_scratch);
-  __ Addsd(result, Operand(kScratchRegister, 8 * kDoubleSize));
-  __ Mulsd(result, input);
-
-  __ bind(&done);
-}
-
 #undef __
 
 
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 1403781..62945f7 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -28,20 +28,6 @@
 };
 
 
-class MathExpGenerator : public AllStatic {
- public:
-  static void EmitMathExp(MacroAssembler* masm,
-                          XMMRegister input,
-                          XMMRegister result,
-                          XMMRegister double_scratch,
-                          Register temp1,
-                          Register temp2);
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
-
 enum StackArgumentsAccessorReceiverMode {
   ARGUMENTS_CONTAIN_RECEIVER,
   ARGUMENTS_DONT_CONTAIN_RECEIVER
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index 9d70c32..35da7a2 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -116,8 +116,7 @@
   const int kDoubleRegsSize = kDoubleSize * XMMRegister::kMaxNumRegisters;
   __ subp(rsp, Immediate(kDoubleRegsSize));
 
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
     int code = config->GetAllocatableDoubleCode(i);
     XMMRegister xmm_reg = XMMRegister::from_code(code);
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 7126b89..d679898 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -142,19 +142,18 @@
   SHORT_IMMEDIATE_INSTR
 };
 
-
 enum Prefixes {
   ESCAPE_PREFIX = 0x0F,
   OPERAND_SIZE_OVERRIDE_PREFIX = 0x66,
   ADDRESS_SIZE_OVERRIDE_PREFIX = 0x67,
   VEX3_PREFIX = 0xC4,
   VEX2_PREFIX = 0xC5,
+  LOCK_PREFIX = 0xF0,
   REPNE_PREFIX = 0xF2,
   REP_PREFIX = 0xF3,
   REPEQ_PREFIX = REP_PREFIX
 };
 
-
 struct InstructionDesc {
   const char* mnem;
   InstructionType type;
@@ -1514,7 +1513,16 @@
   if (operand_size_ == 0x66) {
     // 0x66 0x0F prefix.
     int mod, regop, rm;
-    if (opcode == 0x3A) {
+    if (opcode == 0x38) {
+      byte third_byte = *current;
+      current = data + 3;
+      if (third_byte == 0x40) {
+        // pmulld xmm, xmm/m128
+        get_modrm(*current, &mod, &regop, &rm);
+        AppendToBuffer("pmulld %s,", NameOfXMMRegister(regop));
+        current += PrintRightXMMOperand(current);
+      }
+    } else if (opcode == 0x3A) {
       byte third_byte = *current;
       current = data + 3;
       if (third_byte == 0x17) {
@@ -1537,11 +1545,18 @@
         AppendToBuffer(",0x%x", (*current) & 3);
         current += 1;
       } else if (third_byte == 0x16) {
-        get_modrm(*current, &mod, &rm, &regop);
+        get_modrm(*current, &mod, &regop, &rm);
         AppendToBuffer("pextrd ");  // reg/m32, xmm, imm8
         current += PrintRightOperand(current);
         AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
         current += 1;
+      } else if (third_byte == 0x21) {
+        get_modrm(*current, &mod, &regop, &rm);
+        // insertps xmm, xmm/m32, imm8
+        AppendToBuffer("insertps %s,", NameOfXMMRegister(regop));
+        current += PrintRightXMMOperand(current);
+        AppendToBuffer(",0x%x", (*current) & 3);
+        current += 1;
       } else if (third_byte == 0x22) {
         get_modrm(*current, &mod, &regop, &rm);
         AppendToBuffer("pinsrd ");  // xmm, reg/m32, imm8
@@ -1597,6 +1612,11 @@
       } else if (opcode == 0x50) {
         AppendToBuffer("movmskpd %s,", NameOfCPURegister(regop));
         current += PrintRightXMMOperand(current);
+      } else if (opcode == 0x70) {
+        AppendToBuffer("pshufd %s,", NameOfXMMRegister(regop));
+        current += PrintRightXMMOperand(current);
+        AppendToBuffer(",0x%x", *current);
+        current += 1;
       } else if (opcode == 0x72) {
         current += 1;
         AppendToBuffer("%s %s,%d", (regop == 6) ? "pslld" : "psrld",
@@ -1607,6 +1627,8 @@
         AppendToBuffer("%s %s,%d", (regop == 6) ? "psllq" : "psrlq",
                        NameOfXMMRegister(rm), *current & 0x7f);
         current += 1;
+      } else if (opcode == 0xB1) {
+        current += PrintOperands("cmpxchg", OPER_REG_OP_ORDER, current);
       } else {
         const char* mnemonic = "?";
         if (opcode == 0x54) {
@@ -1615,6 +1637,8 @@
           mnemonic = "orpd";
         } else  if (opcode == 0x57) {
           mnemonic = "xorpd";
+        } else if (opcode == 0x5B) {
+          mnemonic = "cvtps2dq";
         } else if (opcode == 0x2E) {
           mnemonic = "ucomisd";
         } else if (opcode == 0x2F) {
@@ -1625,6 +1649,12 @@
           mnemonic = "punpckldq";
         } else if (opcode == 0x6A) {
           mnemonic = "punpckhdq";
+        } else if (opcode == 0xF4) {
+          mnemonic = "pmuludq";
+        } else if (opcode == 0xFA) {
+          mnemonic = "psubd";
+        } else if (opcode == 0xFE) {
+          mnemonic = "paddd";
         } else {
           UnimplementedInstruction();
         }
@@ -1766,6 +1796,19 @@
     } else {
       UnimplementedInstruction();
     }
+  } else if (opcode == 0x10 || opcode == 0x11) {
+    // movups xmm, xmm/m128
+    // movups xmm/m128, xmm
+    int mod, regop, rm;
+    get_modrm(*current, &mod, &regop, &rm);
+    AppendToBuffer("movups ");
+    if (opcode == 0x11) {
+      current += PrintRightXMMOperand(current);
+      AppendToBuffer(",%s", NameOfXMMRegister(regop));
+    } else {
+      AppendToBuffer("%s,", NameOfXMMRegister(regop));
+      current += PrintRightXMMOperand(current);
+    }
   } else if (opcode == 0x1F) {
     // NOP
     int mod, regop, rm;
@@ -1812,29 +1855,28 @@
     byte_size_operand_ = idesc.byte_size_operation;
     current += PrintOperands(idesc.mnem, idesc.op_order_, current);
 
-  } else if (opcode >= 0x53 && opcode <= 0x5F) {
+  } else if (opcode >= 0x51 && opcode <= 0x5F) {
     const char* const pseudo_op[] = {
-      "rcpps",
-      "andps",
-      "andnps",
-      "orps",
-      "xorps",
-      "addps",
-      "mulps",
-      "cvtps2pd",
-      "cvtdq2ps",
-      "subps",
-      "minps",
-      "divps",
-      "maxps",
+        "sqrtps",   "rsqrtps", "rcpps", "andps", "andnps",
+        "orps",     "xorps",   "addps", "mulps", "cvtps2pd",
+        "cvtdq2ps", "subps",   "minps", "divps", "maxps",
     };
     int mod, regop, rm;
     get_modrm(*current, &mod, &regop, &rm);
-    AppendToBuffer("%s %s,",
-                   pseudo_op[opcode - 0x53],
+    AppendToBuffer("%s %s,", pseudo_op[opcode - 0x51],
                    NameOfXMMRegister(regop));
     current += PrintRightXMMOperand(current);
 
+  } else if (opcode == 0xC2) {
+    // cmpps xmm, xmm/m128, imm8
+    int mod, regop, rm;
+    get_modrm(*current, &mod, &regop, &rm);
+    const char* const pseudo_op[] = {"cmpeqps",    "cmpltps",  "cmpleps",
+                                     "cmpunordps", "cmpneqps", "cmpnltps",
+                                     "cmpnleps",   "cmpordps"};
+    AppendToBuffer("%s %s,%s", pseudo_op[current[1]], NameOfXMMRegister(regop),
+                   NameOfXMMRegister(rm));
+    current += 2;
   } else if (opcode == 0xC6) {
     // shufps xmm, xmm/m128, imm8
     int mod, regop, rm;
@@ -1843,7 +1885,6 @@
     current += PrintRightXMMOperand(current);
     AppendToBuffer(", %d", (*current) & 3);
     current += 1;
-
   } else if (opcode == 0x50) {
     // movmskps reg, xmm
     int mod, regop, rm;
@@ -1884,6 +1925,12 @@
     current += PrintRightOperand(current);
   } else if (opcode == 0x0B) {
     AppendToBuffer("ud2");
+  } else if (opcode == 0xB0 || opcode == 0xB1) {
+    // CMPXCHG.
+    if (opcode == 0xB0) {
+      byte_size_operand_ = true;
+    }
+    current += PrintOperands(mnemonic, OPER_REG_OP_ORDER, current);
   } else {
     UnimplementedInstruction();
   }
@@ -1926,6 +1973,9 @@
       return "shrd";
     case 0xAF:
       return "imul";
+    case 0xB0:
+    case 0xB1:
+      return "cmpxchg";
     case 0xB6:
       return "movzxb";
     case 0xB7:
@@ -1963,6 +2013,8 @@
       if (rex_w()) AppendToBuffer("REX.W ");
     } else if ((current & 0xFE) == 0xF2) {  // Group 1 prefix (0xF2 or 0xF3).
       group_1_prefix_ = current;
+    } else if (current == LOCK_PREFIX) {
+      AppendToBuffer("lock ");
     } else if (current == VEX3_PREFIX) {
       vex_byte0_ = current;
       vex_byte1_ = *(data + 1);
@@ -2427,7 +2479,7 @@
 
 
 const char* NameConverter::NameOfAddress(byte* addr) const {
-  v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+  v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
   return tmp_buffer_.start();
 }
 
@@ -2494,7 +2546,7 @@
     buffer[0] = '\0';
     byte* prev_pc = pc;
     pc += d.InstructionDecode(buffer, pc);
-    fprintf(f, "%p", prev_pc);
+    fprintf(f, "%p", static_cast<void*>(prev_pc));
     fprintf(f, "    ");
 
     for (byte* bp = prev_pc; bp < pc; bp++) {
diff --git a/src/x64/interface-descriptors-x64.cc b/src/x64/interface-descriptors-x64.cc
index e1e7f9c..a7cf120 100644
--- a/src/x64/interface-descriptors-x64.cc
+++ b/src/x64/interface-descriptors-x64.cc
@@ -11,6 +11,14 @@
 
 const Register CallInterfaceDescriptor::ContextRegister() { return rsi; }
 
+void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
+    CallInterfaceDescriptorData* data, int register_parameter_count) {
+  const Register default_stub_registers[] = {rax, rbx, rcx, rdx, rdi};
+  CHECK_LE(static_cast<size_t>(register_parameter_count),
+           arraysize(default_stub_registers));
+  data->InitializePlatformSpecific(register_parameter_count,
+                                   default_stub_registers);
+}
 
 const Register LoadDescriptor::ReceiverRegister() { return rdx; }
 const Register LoadDescriptor::NameRegister() { return rcx; }
@@ -39,9 +47,6 @@
 const Register StoreTransitionDescriptor::MapRegister() { return rbx; }
 
 
-const Register LoadGlobalViaContextDescriptor::SlotRegister() { return rbx; }
-
-
 const Register StoreGlobalViaContextDescriptor::SlotRegister() { return rbx; }
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return rax; }
 
@@ -63,8 +68,6 @@
 const Register GrowArrayElementsDescriptor::ObjectRegister() { return rax; }
 const Register GrowArrayElementsDescriptor::KeyRegister() { return rbx; }
 
-const Register HasPropertyDescriptor::ObjectRegister() { return rax; }
-const Register HasPropertyDescriptor::KeyRegister() { return rbx; }
 
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -250,43 +253,27 @@
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
+void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // register state
   // rax -- number of arguments
   // rdi -- function
   // rbx -- allocation site with elements kind
-  Register registers[] = {rdi, rbx};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  // stack param count needs (constructor pointer, and single argument)
   Register registers[] = {rdi, rbx, rax};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-
-void InternalArrayConstructorConstantArgCountDescriptor::
-    InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
   // register state
   // rax -- number of arguments
-  // rdi -- constructor function
-  Register registers[] = {rdi};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
+  // rdi -- function
+  // rbx -- allocation site with elements kind
+  Register registers[] = {rdi, rbx, rax};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-
-void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  // stack param count needs (constructor pointer, and single argument)
-  Register registers[] = {rdi, rax};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastArrayPushDescriptor::InitializePlatformSpecific(
+void VarArgFunctionDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // stack param count needs (arg count)
   Register registers[] = {rax};
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 2efb529..cd6b90c 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -3259,12 +3259,12 @@
     pinsrd(dst, src, imm8);
     return;
   }
-  Movd(xmm0, src);
+  Movd(kScratchDoubleReg, src);
   if (imm8 == 1) {
-    punpckldq(dst, xmm0);
+    punpckldq(dst, kScratchDoubleReg);
   } else {
     DCHECK_EQ(0, imm8);
-    Movss(dst, xmm0);
+    Movss(dst, kScratchDoubleReg);
   }
 }
 
@@ -3276,12 +3276,12 @@
     pinsrd(dst, src, imm8);
     return;
   }
-  Movd(xmm0, src);
+  Movd(kScratchDoubleReg, src);
   if (imm8 == 1) {
-    punpckldq(dst, xmm0);
+    punpckldq(dst, kScratchDoubleReg);
   } else {
     DCHECK_EQ(0, imm8);
-    Movss(dst, xmm0);
+    Movss(dst, kScratchDoubleReg);
   }
 }
 
@@ -3743,15 +3743,15 @@
 void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
                                            Register input_reg) {
   Label done;
-  Movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
-  Cvttsd2siq(result_reg, xmm0);
+  Movsd(kScratchDoubleReg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+  Cvttsd2siq(result_reg, kScratchDoubleReg);
   cmpq(result_reg, Immediate(1));
   j(no_overflow, &done, Label::kNear);
 
   // Slow case.
   if (input_reg.is(result_reg)) {
     subp(rsp, Immediate(kDoubleSize));
-    Movsd(MemOperand(rsp, 0), xmm0);
+    Movsd(MemOperand(rsp, 0), kScratchDoubleReg);
     SlowTruncateToI(result_reg, rsp, 0);
     addp(rsp, Immediate(kDoubleSize));
   } else {
@@ -3788,8 +3788,8 @@
                                Label* lost_precision, Label* is_nan,
                                Label* minus_zero, Label::Distance dst) {
   Cvttsd2si(result_reg, input_reg);
-  Cvtlsi2sd(xmm0, result_reg);
-  Ucomisd(xmm0, input_reg);
+  Cvtlsi2sd(kScratchDoubleReg, result_reg);
+  Ucomisd(kScratchDoubleReg, input_reg);
   j(not_equal, lost_precision, dst);
   j(parity_even, is_nan, dst);  // NaN.
   if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
@@ -4338,11 +4338,12 @@
                                              const ParameterCount& expected,
                                              const ParameterCount& actual) {
   Label skip_flooding;
-  ExternalReference step_in_enabled =
-      ExternalReference::debug_step_in_enabled_address(isolate());
-  Operand step_in_enabled_operand = ExternalOperand(step_in_enabled);
-  cmpb(step_in_enabled_operand, Immediate(0));
-  j(equal, &skip_flooding);
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(isolate());
+  Operand last_step_action_operand = ExternalOperand(last_step_action);
+  STATIC_ASSERT(StepFrame > StepIn);
+  cmpb(last_step_action_operand, Immediate(StepIn));
+  j(less, &skip_flooding);
   {
     FrameScope frame(this,
                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -4401,8 +4402,8 @@
 
 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
   movp(vector, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-  movp(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
-  movp(vector, FieldOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+  movp(vector, FieldOperand(vector, JSFunction::kLiteralsOffset));
+  movp(vector, FieldOperand(vector, LiteralsArray::kFeedbackVectorOffset));
 }
 
 
@@ -4483,8 +4484,7 @@
                 arg_stack_space * kRegisterSize;
     subp(rsp, Immediate(space));
     int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
-    const RegisterConfiguration* config =
-        RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+    const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
     for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
       DoubleRegister reg =
           DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
@@ -4530,8 +4530,7 @@
   // r15 : argv
   if (save_doubles) {
     int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
-    const RegisterConfiguration* config =
-        RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+    const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
     for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
       DoubleRegister reg =
           DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 013d0f1..b088c7d 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -34,8 +34,9 @@
 // Default scratch register used by MacroAssembler (and other code that needs
 // a spare register). The register isn't callee save, and not used by the
 // function calling convention.
-const Register kScratchRegister = { 10 };      // r10.
-const Register kRootRegister = { 13 };         // r13 (callee save).
+const Register kScratchRegister = {10};      // r10.
+const XMMRegister kScratchDoubleReg = {15};  // xmm15.
+const Register kRootRegister = {13};         // r13 (callee save).
 // Actual value of root register is offset from the root array's start
 // to take advantage of negitive 8-bit displacement values.
 const int kRootRegisterBias = 128;
diff --git a/src/x87/assembler-x87.cc b/src/x87/assembler-x87.cc
index 5cc783c..e7e4abe 100644
--- a/src/x87/assembler-x87.cc
+++ b/src/x87/assembler-x87.cc
@@ -106,36 +106,24 @@
   return Memory::Address_at(pc_);
 }
 
+Address RelocInfo::wasm_global_reference() {
+  DCHECK(IsWasmGlobalReference(rmode_));
+  return Memory::Address_at(pc_);
+}
+
 uint32_t RelocInfo::wasm_memory_size_reference() {
   DCHECK(IsWasmMemorySizeReference(rmode_));
   return Memory::uint32_at(pc_);
 }
 
-void RelocInfo::update_wasm_memory_reference(
-    Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
-    ICacheFlushMode icache_flush_mode) {
-  DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
-  if (IsWasmMemoryReference(rmode_)) {
-    Address updated_reference;
-    DCHECK(old_base <= wasm_memory_reference() &&
-           wasm_memory_reference() < old_base + old_size);
-    updated_reference = new_base + (wasm_memory_reference() - old_base);
-    DCHECK(new_base <= updated_reference &&
-           updated_reference < new_base + new_size);
-    Memory::Address_at(pc_) = updated_reference;
-  } else if (IsWasmMemorySizeReference(rmode_)) {
-    uint32_t updated_size_reference;
-    DCHECK(wasm_memory_size_reference() <= old_size);
-    updated_size_reference =
-        new_size + (wasm_memory_size_reference() - old_size);
-    DCHECK(updated_size_reference <= new_size);
-    Memory::uint32_at(pc_) = updated_size_reference;
-  } else {
-    UNREACHABLE();
-  }
-  if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
-    Assembler::FlushICache(isolate_, pc_, sizeof(int32_t));
-  }
+void RelocInfo::unchecked_update_wasm_memory_reference(
+    Address address, ICacheFlushMode flush_mode) {
+  Memory::Address_at(pc_) = address;
+}
+
+void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
+                                                  ICacheFlushMode flush_mode) {
+  Memory::uint32_at(pc_) = size;
 }
 
 // -----------------------------------------------------------------------------
@@ -259,6 +247,8 @@
   desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
   desc->origin = this;
   desc->constant_pool_size = 0;
+  desc->unwinding_info_size = 0;
+  desc->unwinding_info = nullptr;
 }
 
 
@@ -601,6 +591,33 @@
   emit_operand(reg, op);
 }
 
+void Assembler::lock() {
+  EnsureSpace ensure_space(this);
+  EMIT(0xF0);
+}
+
+void Assembler::cmpxchg(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x0F);
+  EMIT(0xB1);
+  emit_operand(src, dst);
+}
+
+void Assembler::cmpxchg_b(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x0F);
+  EMIT(0xB0);
+  emit_operand(src, dst);
+}
+
+void Assembler::cmpxchg_w(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0xB1);
+  emit_operand(src, dst);
+}
+
 void Assembler::adc(Register dst, int32_t imm32) {
   EnsureSpace ensure_space(this);
   emit_arith(2, Operand(dst), Immediate(imm32));
@@ -1387,7 +1404,6 @@
 
 
 void Assembler::call(Label* L) {
-  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   if (L->is_bound()) {
     const int long_size = 5;
@@ -1405,7 +1421,6 @@
 
 
 void Assembler::call(byte* entry, RelocInfo::Mode rmode) {
-  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   DCHECK(!RelocInfo::IsCodeTarget(rmode));
   EMIT(0xE8);
@@ -1424,7 +1439,6 @@
 
 
 void Assembler::call(const Operand& adr) {
-  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   EMIT(0xFF);
   emit_operand(edx, adr);
@@ -1439,7 +1453,6 @@
 void Assembler::call(Handle<Code> code,
                      RelocInfo::Mode rmode,
                      TypeFeedbackId ast_id) {
-  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   DCHECK(RelocInfo::IsCodeTarget(rmode)
       || rmode == RelocInfo::CODE_AGE_SEQUENCE);
diff --git a/src/x87/assembler-x87.h b/src/x87/assembler-x87.h
index eaf28e9..d4cde52 100644
--- a/src/x87/assembler-x87.h
+++ b/src/x87/assembler-x87.h
@@ -122,8 +122,6 @@
     Register r = {code};
     return r;
   }
-  const char* ToString();
-  bool IsAllocatable() const;
   bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
   bool is(Register reg) const { return reg_code == reg.reg_code; }
   int code() const {
@@ -147,6 +145,8 @@
 #undef DECLARE_REGISTER
 const Register no_reg = {Register::kCode_no_reg};
 
+static const bool kSimpleFPAliasing = true;
+
 struct X87Register {
   enum Code {
 #define REGISTER_CODE(R) kCode_##R,
@@ -164,7 +164,6 @@
     return result;
   }
 
-  bool IsAllocatable() const;
   bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
 
   int code() const {
@@ -174,8 +173,6 @@
 
   bool is(X87Register reg) const { return reg_code == reg.reg_code; }
 
-  const char* ToString();
-
   int reg_code;
 };
 
@@ -654,6 +651,14 @@
   void xchg_b(Register reg, const Operand& op);
   void xchg_w(Register reg, const Operand& op);
 
+  // Lock prefix
+  void lock();
+
+  // CompareExchange
+  void cmpxchg(const Operand& dst, Register src);
+  void cmpxchg_b(const Operand& dst, Register src);
+  void cmpxchg_w(const Operand& dst, Register src);
+
   // Arithmetics
   void adc(Register dst, int32_t imm32);
   void adc(Register dst, const Operand& src);
diff --git a/src/x87/builtins-x87.cc b/src/x87/builtins-x87.cc
index 7018802..0600f0d 100644
--- a/src/x87/builtins-x87.cc
+++ b/src/x87/builtins-x87.cc
@@ -16,10 +16,7 @@
 
 #define __ ACCESS_MASM(masm)
 
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
-                                CFunctionId id,
-                                BuiltinExtraArguments extra_args) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
   // ----------- S t a t e -------------
   //  -- eax                : number of arguments excluding receiver
   //  -- edi                : target
@@ -39,19 +36,11 @@
   __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
   // Insert extra arguments.
-  int num_extra_args = 0;
-  if (extra_args != BuiltinExtraArguments::kNone) {
-    __ PopReturnAddressTo(ecx);
-    if (extra_args & BuiltinExtraArguments::kTarget) {
-      ++num_extra_args;
-      __ Push(edi);
-    }
-    if (extra_args & BuiltinExtraArguments::kNewTarget) {
-      ++num_extra_args;
-      __ Push(edx);
-    }
-    __ PushReturnAddressFrom(ecx);
-  }
+  const int num_extra_args = 2;
+  __ PopReturnAddressTo(ecx);
+  __ Push(edi);
+  __ Push(edx);
+  __ PushReturnAddressFrom(ecx);
 
   // JumpToExternalReference expects eax to contain the number of arguments
   // including the receiver and the extra arguments.
@@ -396,8 +385,8 @@
   __ AssertGeneratorObject(ebx);
 
   // Store input value into generator object.
-  __ mov(FieldOperand(ebx, JSGeneratorObject::kInputOffset), eax);
-  __ RecordWriteField(ebx, JSGeneratorObject::kInputOffset, eax, ecx,
+  __ mov(FieldOperand(ebx, JSGeneratorObject::kInputOrDebugPosOffset), eax);
+  __ RecordWriteField(ebx, JSGeneratorObject::kInputOrDebugPosOffset, eax, ecx,
                       kDontSaveFPRegs);
 
   // Store resume mode into generator object.
@@ -408,22 +397,20 @@
   __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
 
   // Flood function if we are stepping.
-  Label skip_flooding;
-  ExternalReference step_in_enabled =
-      ExternalReference::debug_step_in_enabled_address(masm->isolate());
-  __ cmpb(Operand::StaticVariable(step_in_enabled), Immediate(0));
-  __ j(equal, &skip_flooding);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ Push(ebx);
-    __ Push(edx);
-    __ Push(edi);
-    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
-    __ Pop(edx);
-    __ Pop(ebx);
-    __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
-  }
-  __ bind(&skip_flooding);
+  Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+  Label stepping_prepared;
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(masm->isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  __ cmpb(Operand::StaticVariable(last_step_action), Immediate(StepIn));
+  __ j(greater_equal, &prepare_step_in_if_stepping);
+
+  // Flood function if we need to continue stepping in the suspended generator.
+  ExternalReference debug_suspended_generator =
+      ExternalReference::debug_suspended_generator_address(masm->isolate());
+  __ cmp(ebx, Operand::StaticVariable(debug_suspended_generator));
+  __ j(equal, &prepare_step_in_suspended_generator);
+  __ bind(&stepping_prepared);
 
   // Pop return address.
   __ PopReturnAddressTo(eax);
@@ -519,6 +506,51 @@
     __ mov(eax, ebx);  // Continuation expects generator object in eax.
     __ jmp(edx);
   }
+
+  __ bind(&prepare_step_in_if_stepping);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(ebx);
+    __ Push(edx);
+    __ Push(edi);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(edx);
+    __ Pop(ebx);
+    __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+  }
+  __ jmp(&stepping_prepared);
+
+  __ bind(&prepare_step_in_suspended_generator);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(ebx);
+    __ Push(edx);
+    __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+    __ Pop(edx);
+    __ Pop(ebx);
+    __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+  }
+  __ jmp(&stepping_prepared);
+}
+
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
+                                  Register scratch2) {
+  Register args_count = scratch1;
+  Register return_pc = scratch2;
+
+  // Get the arguments + reciever count.
+  __ mov(args_count,
+         Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ mov(args_count,
+         FieldOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+  // Leave the frame (also dropping the register file).
+  __ leave();
+
+  // Drop receiver + arguments.
+  __ pop(return_pc);
+  __ add(esp, args_count);
+  __ push(return_pc);
 }
 
 // Generate code for entering a JS function with the interpreter.
@@ -624,18 +656,7 @@
   masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
 
   // The return value is in eax.
-
-  // Get the arguments + reciever count.
-  __ mov(ebx, Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
-  __ mov(ebx, FieldOperand(ebx, BytecodeArray::kParameterSizeOffset));
-
-  // Leave the frame (also dropping the register file).
-  __ leave();
-
-  // Drop receiver + arguments and return.
-  __ pop(ecx);
-  __ add(esp, ebx);
-  __ push(ecx);
+  LeaveInterpreterFrame(masm, ebx, ecx);
   __ ret(0);
 
   // Load debug copy of the bytecode array.
@@ -662,6 +683,31 @@
   __ jmp(ecx);
 }
 
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+  // Save the function and context for call to CompileBaseline.
+  __ mov(edi, Operand(ebp, StandardFrameConstants::kFunctionOffset));
+  __ mov(kContextRegister,
+         Operand(ebp, StandardFrameConstants::kContextOffset));
+
+  // Leave the frame before recompiling for baseline so that we don't count as
+  // an activation on the stack.
+  LeaveInterpreterFrame(masm, ebx, ecx);
+
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    // Push return value.
+    __ push(eax);
+
+    // Push function as argument and compile for baseline.
+    __ push(edi);
+    __ CallRuntime(Runtime::kCompileBaseline);
+
+    // Restore return value.
+    __ pop(eax);
+  }
+  __ ret(0);
+}
+
 static void Generate_InterpreterPushArgs(MacroAssembler* masm,
                                          Register array_limit) {
   // ----------- S t a t e -------------
@@ -841,13 +887,30 @@
   const int bailout_id = BailoutId::None().ToInt();
   __ cmp(temp, Immediate(Smi::FromInt(bailout_id)));
   __ j(not_equal, &loop_bottom);
+
   // Literals available?
+  Label got_literals, maybe_cleared_weakcell;
   __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
                             SharedFunctionInfo::kOffsetToPreviousLiterals));
+
+  // temp contains either a WeakCell pointing to the literals array or the
+  // literals array directly.
+  STATIC_ASSERT(WeakCell::kValueOffset == FixedArray::kLengthOffset);
+  __ JumpIfSmi(FieldOperand(temp, WeakCell::kValueOffset),
+               &maybe_cleared_weakcell);
+  // The WeakCell value is a pointer, therefore it's a valid literals array.
   __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
-  __ JumpIfSmi(temp, &gotta_call_runtime);
+  __ jmp(&got_literals);
+
+  // We have a smi. If it's 0, then we are looking at a cleared WeakCell
+  // around the literals array, and we should visit the runtime. If it's > 0,
+  // then temp already contains the literals array.
+  __ bind(&maybe_cleared_weakcell);
+  __ cmp(FieldOperand(temp, WeakCell::kValueOffset), Immediate(0));
+  __ j(equal, &gotta_call_runtime);
 
   // Save the literals in the closure.
+  __ bind(&got_literals);
   __ mov(ecx, Operand(esp, 0));
   __ mov(FieldOperand(ecx, JSFunction::kLiteralsOffset), temp);
   __ push(index);
@@ -1120,6 +1183,9 @@
 void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
                                                int field_index) {
   // ----------- S t a t e -------------
+  //  -- eax    : number of arguments
+  //  -- edi    : function
+  //  -- esi    : context
   //  -- esp[0] : return address
   //  -- esp[4] : receiver
   // -----------------------------------
@@ -1162,7 +1228,11 @@
   __ bind(&receiver_not_date);
   {
     FrameScope scope(masm, StackFrame::MANUAL);
-    __ EnterFrame(StackFrame::INTERNAL);
+    __ Push(ebp);
+    __ Move(ebp, esp);
+    __ Push(esi);
+    __ Push(edi);
+    __ Push(Immediate(0));
     __ CallRuntime(Runtime::kThrowNotDateError);
   }
 }
@@ -1496,6 +1566,8 @@
 void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
   // ----------- S t a t e -------------
   //  -- eax                 : number of arguments
+  //  -- edi                 : function
+  //  -- esi                 : context
   //  -- esp[0]              : return address
   //  -- esp[(argc - n) * 8] : arg[n] (zero-based)
   //  -- esp[(argc + 1) * 8] : receiver
@@ -1523,27 +1595,32 @@
     __ mov(ebx, Operand(esp, ecx, times_pointer_size, 0));
 
     // Load the double value of the parameter into stx_1, maybe converting the
-    // parameter to a number first using the ToNumberStub if necessary.
+    // parameter to a number first using the ToNumber builtin if necessary.
     Label convert, convert_smi, convert_number, done_convert;
     __ bind(&convert);
     __ JumpIfSmi(ebx, &convert_smi);
     __ JumpIfRoot(FieldOperand(ebx, HeapObject::kMapOffset),
                   Heap::kHeapNumberMapRootIndex, &convert_number);
     {
-      // Parameter is not a Number, use the ToNumberStub to convert it.
-      FrameScope scope(masm, StackFrame::INTERNAL);
+      // Parameter is not a Number, use the ToNumber builtin to convert it.
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ Push(ebp);
+      __ Move(ebp, esp);
+      __ Push(esi);
+      __ Push(edi);
       __ SmiTag(eax);
       __ SmiTag(ecx);
       __ Push(eax);
       __ Push(ecx);
       __ Push(edx);
       __ mov(eax, ebx);
-      ToNumberStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
       __ mov(ebx, eax);
       __ Pop(edx);
       __ Pop(ecx);
       __ Pop(eax);
+      __ Pop(edi);
+      __ Pop(esi);
       {
         // Restore the double accumulator value (stX_0).
         Label restore_smi, done_restore;
@@ -1560,6 +1637,7 @@
       }
       __ SmiUntag(ecx);
       __ SmiUntag(eax);
+      __ leave();
     }
     __ jmp(&convert);
     __ bind(&convert_number);
@@ -1650,8 +1728,7 @@
   }
 
   // 2a. Convert the first argument to a number.
-  ToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
+  __ Jump(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
 
   // 2b. No arguments, return +0 (already in eax).
   __ bind(&no_arguments);
@@ -1701,8 +1778,7 @@
       __ Push(edi);
       __ Push(edx);
       __ Move(eax, ebx);
-      ToNumberStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
       __ Move(ebx, eax);
       __ Pop(edx);
       __ Pop(edi);
@@ -2601,6 +2677,81 @@
   __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
 }
 
+// static
+void Builtins::Generate_StringToNumber(MacroAssembler* masm) {
+  // The StringToNumber stub takes one argument in eax.
+  __ AssertString(eax);
+
+  // Check if string has a cached array index.
+  Label runtime;
+  __ test(FieldOperand(eax, String::kHashFieldOffset),
+          Immediate(String::kContainsCachedArrayIndexMask));
+  __ j(not_zero, &runtime, Label::kNear);
+  __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
+  __ IndexFromHash(eax, eax);
+  __ Ret();
+
+  __ bind(&runtime);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    // Push argument.
+    __ push(eax);
+    // We cannot use a tail call here because this builtin can also be called
+    // from wasm.
+    __ CallRuntime(Runtime::kStringToNumber);
+  }
+  __ Ret();
+}
+
+// static
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+  // The ToNumber stub takes one argument in eax.
+  Label not_smi;
+  __ JumpIfNotSmi(eax, &not_smi, Label::kNear);
+  __ Ret();
+  __ bind(&not_smi);
+
+  Label not_heap_number;
+  __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
+  __ j(not_equal, &not_heap_number, Label::kNear);
+  __ Ret();
+  __ bind(&not_heap_number);
+
+  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+          RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_NonNumberToNumber(MacroAssembler* masm) {
+  // The NonNumberToNumber stub takes one argument in eax.
+  __ AssertNotNumber(eax);
+
+  Label not_string;
+  __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edi);
+  // eax: object
+  // edi: object map
+  __ j(above_equal, &not_string, Label::kNear);
+  __ Jump(masm->isolate()->builtins()->StringToNumber(),
+          RelocInfo::CODE_TARGET);
+  __ bind(&not_string);
+
+  Label not_oddball;
+  __ CmpInstanceType(edi, ODDBALL_TYPE);
+  __ j(not_equal, &not_oddball, Label::kNear);
+  __ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset));
+  __ Ret();
+  __ bind(&not_oddball);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    // Push argument.
+    __ push(eax);
+    // We cannot use a tail call here because this builtin can also be called
+    // from wasm.
+    __ CallRuntime(Runtime::kToNumber);
+  }
+  __ Ret();
+}
+
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax : actual number of arguments
diff --git a/src/x87/code-stubs-x87.cc b/src/x87/code-stubs-x87.cc
index fdb97ee..7b069ac 100644
--- a/src/x87/code-stubs-x87.cc
+++ b/src/x87/code-stubs-x87.cc
@@ -22,78 +22,29 @@
 namespace v8 {
 namespace internal {
 
+#define __ ACCESS_MASM(masm)
 
-static void InitializeArrayConstructorDescriptor(
-    Isolate* isolate, CodeStubDescriptor* descriptor,
-    int constant_stack_parameter_count) {
-  // register state
-  // eax -- number of arguments
-  // edi -- function
-  // ebx -- allocation site with elements kind
-  Address deopt_handler = Runtime::FunctionForId(
-      Runtime::kArrayConstructor)->entry;
-
-  if (constant_stack_parameter_count == 0) {
-    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  } else {
-    descriptor->Initialize(eax, deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  }
+void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
+  __ pop(ecx);
+  __ mov(MemOperand(esp, eax, times_4, 0), edi);
+  __ push(edi);
+  __ push(ebx);
+  __ push(ecx);
+  __ add(eax, Immediate(3));
+  __ TailCallRuntime(Runtime::kNewArray);
 }
 
-
-static void InitializeInternalArrayConstructorDescriptor(
-    Isolate* isolate, CodeStubDescriptor* descriptor,
-    int constant_stack_parameter_count) {
-  // register state
-  // eax -- number of arguments
-  // edi -- constructor function
-  Address deopt_handler = Runtime::FunctionForId(
-      Runtime::kInternalArrayConstructor)->entry;
-
-  if (constant_stack_parameter_count == 0) {
-    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  } else {
-    descriptor->Initialize(eax, deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  }
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
   Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
   descriptor->Initialize(eax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
 }
 
-void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+void FastFunctionBindStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
+  Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
+  descriptor->Initialize(eax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
 }
 
-
-void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-
 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
                                                ExternalReference miss) {
   // Update the static counter each time a new code stub is generated.
@@ -381,7 +332,6 @@
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
                                           &miss,  // When index out of range.
-                                          STRING_INDEX_IS_ARRAY_INDEX,
                                           RECEIVER_IS_STRING);
   char_at_generator.GenerateFast(masm);
   __ ret(0);
@@ -1178,6 +1128,7 @@
   // edi : the function to call
   Isolate* isolate = masm->isolate();
   Label initialize, done, miss, megamorphic, not_array_function;
+  Label done_increment_count, done_initialize_count;
 
   // Load the cache state into ecx.
   __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
@@ -1190,7 +1141,7 @@
   // type-feedback-vector.h).
   Label check_allocation_site;
   __ cmp(edi, FieldOperand(ecx, WeakCell::kValueOffset));
-  __ j(equal, &done, Label::kFar);
+  __ j(equal, &done_increment_count, Label::kFar);
   __ CompareRoot(ecx, Heap::kmegamorphic_symbolRootIndex);
   __ j(equal, &done, Label::kFar);
   __ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset),
@@ -1213,7 +1164,7 @@
   __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
   __ cmp(edi, ecx);
   __ j(not_equal, &megamorphic);
-  __ jmp(&done, Label::kFar);
+  __ jmp(&done_increment_count, Label::kFar);
 
   __ bind(&miss);
 
@@ -1242,11 +1193,25 @@
   // slot.
   CreateAllocationSiteStub create_stub(isolate);
   CallStubInRecordCallTarget(masm, &create_stub);
-  __ jmp(&done);
+  __ jmp(&done_initialize_count);
 
   __ bind(&not_array_function);
   CreateWeakCellStub weak_cell_stub(isolate);
   CallStubInRecordCallTarget(masm, &weak_cell_stub);
+  __ bind(&done_initialize_count);
+
+  // Initialize the call counter.
+  __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+                      FixedArray::kHeaderSize + kPointerSize),
+         Immediate(Smi::FromInt(1)));
+  __ jmp(&done);
+
+  __ bind(&done_increment_count);
+  // Increment the call count for monomorphic function calls.
+  __ add(FieldOperand(ebx, edx, times_half_pointer_size,
+                      FixedArray::kHeaderSize + kPointerSize),
+         Immediate(Smi::FromInt(1)));
+
   __ bind(&done);
 }
 
@@ -1310,7 +1275,7 @@
   // Increment the call count for monomorphic function calls.
   __ add(FieldOperand(ebx, edx, times_half_pointer_size,
                       FixedArray::kHeaderSize + kPointerSize),
-         Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+         Immediate(Smi::FromInt(1)));
 
   __ mov(ebx, ecx);
   __ mov(edx, edi);
@@ -1358,7 +1323,7 @@
   // Increment the call count for monomorphic function calls.
   __ add(FieldOperand(ebx, edx, times_half_pointer_size,
                       FixedArray::kHeaderSize + kPointerSize),
-         Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+         Immediate(Smi::FromInt(1)));
 
   __ bind(&call_function);
   __ Set(eax, argc);
@@ -1429,7 +1394,7 @@
   // Initialize the call counter.
   __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
                       FixedArray::kHeaderSize + kPointerSize),
-         Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+         Immediate(Smi::FromInt(1)));
 
   // Store the function. Use a stub since we need a frame for allocation.
   // ebx - vector
@@ -1483,7 +1448,7 @@
   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
   // It is important that the store buffer overflow stubs are generated first.
-  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+  CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
   CreateWeakCellStub::GenerateAheadOfTime(isolate);
   BinaryOpICStub::GenerateAheadOfTime(isolate);
@@ -1824,13 +1789,7 @@
   }
   __ push(object_);
   __ push(index_);  // Consumed by runtime conversion function.
-  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
-    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
-  } else {
-    DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
-    // NumberToSmi discards numbers that are not exact integers.
-    __ CallRuntime(Runtime::kNumberToSmi);
-  }
+  __ CallRuntime(Runtime::kNumberToSmi);
   if (!index_.is(eax)) {
     // Save the conversion result before the pop instructions below
     // have a chance to overwrite it.
@@ -2163,77 +2122,12 @@
   // ecx: sub string length (smi)
   // edx: from index (smi)
   StringCharAtGenerator generator(eax, edx, ecx, eax, &runtime, &runtime,
-                                  &runtime, STRING_INDEX_IS_NUMBER,
-                                  RECEIVER_IS_STRING);
+                                  &runtime, RECEIVER_IS_STRING);
   generator.GenerateFast(masm);
   __ ret(3 * kPointerSize);
   generator.SkipSlow(masm, &runtime);
 }
 
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
-  // The ToNumber stub takes one argument in eax.
-  Label not_smi;
-  __ JumpIfNotSmi(eax, &not_smi, Label::kNear);
-  __ Ret();
-  __ bind(&not_smi);
-
-  Label not_heap_number;
-  __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
-  __ j(not_equal, &not_heap_number, Label::kNear);
-  __ Ret();
-  __ bind(&not_heap_number);
-
-  NonNumberToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
-}
-
-void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
-  // The NonNumberToNumber stub takes one argument in eax.
-  __ AssertNotNumber(eax);
-
-  Label not_string;
-  __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edi);
-  // eax: object
-  // edi: object map
-  __ j(above_equal, &not_string, Label::kNear);
-  StringToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
-  __ bind(&not_string);
-
-  Label not_oddball;
-  __ CmpInstanceType(edi, ODDBALL_TYPE);
-  __ j(not_equal, &not_oddball, Label::kNear);
-  __ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset));
-  __ Ret();
-  __ bind(&not_oddball);
-
-  __ pop(ecx);   // Pop return address.
-  __ push(eax);  // Push argument.
-  __ push(ecx);  // Push return address.
-  __ TailCallRuntime(Runtime::kToNumber);
-}
-
-void StringToNumberStub::Generate(MacroAssembler* masm) {
-  // The StringToNumber stub takes one argument in eax.
-  __ AssertString(eax);
-
-  // Check if string has a cached array index.
-  Label runtime;
-  __ test(FieldOperand(eax, String::kHashFieldOffset),
-          Immediate(String::kContainsCachedArrayIndexMask));
-  __ j(not_zero, &runtime, Label::kNear);
-  __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
-  __ IndexFromHash(eax, eax);
-  __ Ret();
-
-  __ bind(&runtime);
-  __ PopReturnAddressTo(ecx);     // Pop return address.
-  __ Push(eax);                   // Push argument.
-  __ PushReturnAddressFrom(ecx);  // Push return address.
-  __ TailCallRuntime(Runtime::kStringToNumber);
-}
-
 void ToStringStub::Generate(MacroAssembler* masm) {
   // The ToString stub takes one argument in eax.
   Label is_number;
@@ -2440,7 +2334,7 @@
   // Load ecx with the allocation site.  We stick an undefined dummy value here
   // and replace it with the real allocation site later when we instantiate this
   // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
-  __ mov(ecx, handle(isolate()->heap()->undefined_value()));
+  __ mov(ecx, isolate()->factory()->undefined_value());
 
   // Make sure that we actually patched the allocation site.
   if (FLAG_debug_code) {
@@ -3241,14 +3135,14 @@
 
 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  LoadICStub stub(isolate(), state());
+  LoadICStub stub(isolate());
   stub.GenerateForTrampoline(masm);
 }
 
 
 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  KeyedLoadICStub stub(isolate(), state());
+  KeyedLoadICStub stub(isolate());
   stub.GenerateForTrampoline(masm);
 }
 
@@ -4013,17 +3907,14 @@
   }
 }
 
-void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
       isolate);
   ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
       isolate);
-  ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
-      isolate);
-}
+  ArrayNArgumentsConstructorStub stub(isolate);
+  stub.GetCode();
 
-void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
-    Isolate* isolate) {
   ElementsKind kinds[2] = {FAST_ELEMENTS, FAST_HOLEY_ELEMENTS};
   for (int i = 0; i < 2; i++) {
     // For internal arrays we only need a few things
@@ -4031,8 +3922,6 @@
     stubh1.GetCode();
     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
     stubh2.GetCode();
-    InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
-    stubh3.GetCode();
   }
 }
 
@@ -4050,13 +3939,15 @@
     CreateArrayDispatchOneArgument(masm, mode);
 
     __ bind(&not_one_case);
-    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+    ArrayNArgumentsConstructorStub stub(masm->isolate());
+    __ TailCallStub(&stub);
   } else if (argument_count() == NONE) {
     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
   } else if (argument_count() == ONE) {
     CreateArrayDispatchOneArgument(masm, mode);
   } else if (argument_count() == MORE_THAN_ONE) {
-    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+    ArrayNArgumentsConstructorStub stub(masm->isolate());
+    __ TailCallStub(&stub);
   } else {
     UNREACHABLE();
   }
@@ -4166,7 +4057,7 @@
   __ TailCallStub(&stub1);
 
   __ bind(&not_one_case);
-  InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+  ArrayNArgumentsConstructorStub stubN(isolate());
   __ TailCallStub(&stubN);
 }
 
@@ -4475,8 +4366,11 @@
     __ mov(eax, edi);
     __ Ret();
 
-    // Fall back to %AllocateInNewSpace.
+    // Fall back to %AllocateInNewSpace (if not too big).
+    Label too_big_for_new_space;
     __ bind(&allocate);
+    __ cmp(ecx, Immediate(Page::kMaxRegularHeapObjectSize));
+    __ j(greater, &too_big_for_new_space);
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
       __ SmiTag(ecx);
@@ -4489,6 +4383,22 @@
       __ Pop(eax);
     }
     __ jmp(&done_allocate);
+
+    // Fall back to %NewRestParameter.
+    __ bind(&too_big_for_new_space);
+    __ PopReturnAddressTo(ecx);
+    // We reload the function from the caller frame due to register pressure
+    // within this stub. This is the slow path, hence reloading is preferable.
+    if (skip_stub_frame()) {
+      // For Ignition we need to skip the handler/stub frame to reach the
+      // JavaScript frame for the function.
+      __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+      __ Push(Operand(edx, StandardFrameConstants::kFunctionOffset));
+    } else {
+      __ Push(Operand(ebp, StandardFrameConstants::kFunctionOffset));
+    }
+    __ PushReturnAddressFrom(ecx);
+    __ TailCallRuntime(Runtime::kNewRestParameter);
   }
 }
 
@@ -4843,8 +4753,11 @@
   __ mov(eax, edi);
   __ Ret();
 
-  // Fall back to %AllocateInNewSpace.
+  // Fall back to %AllocateInNewSpace (if not too big).
+  Label too_big_for_new_space;
   __ bind(&allocate);
+  __ cmp(ecx, Immediate(Page::kMaxRegularHeapObjectSize));
+  __ j(greater, &too_big_for_new_space);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     __ SmiTag(ecx);
@@ -4857,39 +4770,24 @@
     __ Pop(eax);
   }
   __ jmp(&done_allocate);
-}
 
-void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
-  Register context_reg = esi;
-  Register slot_reg = ebx;
-  Register result_reg = eax;
-  Label slow_case;
-
-  // Go up context chain to the script context.
-  for (int i = 0; i < depth(); ++i) {
-    __ mov(result_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
-    context_reg = result_reg;
+  // Fall back to %NewStrictArguments.
+  __ bind(&too_big_for_new_space);
+  __ PopReturnAddressTo(ecx);
+  // We reload the function from the caller frame due to register pressure
+  // within this stub. This is the slow path, hence reloading is preferable.
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
+    __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+    __ Push(Operand(edx, StandardFrameConstants::kFunctionOffset));
+  } else {
+    __ Push(Operand(ebp, StandardFrameConstants::kFunctionOffset));
   }
-
-  // Load the PropertyCell value at the specified slot.
-  __ mov(result_reg, ContextOperand(context_reg, slot_reg));
-  __ mov(result_reg, FieldOperand(result_reg, PropertyCell::kValueOffset));
-
-  // Check that value is not the_hole.
-  __ CompareRoot(result_reg, Heap::kTheHoleValueRootIndex);
-  __ j(equal, &slow_case, Label::kNear);
-  __ Ret();
-
-  // Fallback to the runtime.
-  __ bind(&slow_case);
-  __ SmiTag(slot_reg);
-  __ Pop(result_reg);  // Pop return address.
-  __ Push(slot_reg);
-  __ Push(result_reg);  // Push return address.
-  __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
+  __ PushReturnAddressFrom(ecx);
+  __ TailCallRuntime(Runtime::kNewStrictArguments);
 }
 
-
 void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
   Register context_reg = esi;
   Register slot_reg = ebx;
diff --git a/src/x87/code-stubs-x87.h b/src/x87/code-stubs-x87.h
index 39a4603..6290cfe 100644
--- a/src/x87/code-stubs-x87.h
+++ b/src/x87/code-stubs-x87.h
@@ -298,8 +298,8 @@
                                   Register r2,
                                   Register r3) {
       for (int i = 0; i < Register::kNumRegisters; i++) {
-        Register candidate = Register::from_code(i);
-        if (candidate.IsAllocatable()) {
+        if (RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(i)) {
+          Register candidate = Register::from_code(i);
           if (candidate.is(ecx)) continue;
           if (candidate.is(r1)) continue;
           if (candidate.is(r2)) continue;
diff --git a/src/x87/codegen-x87.cc b/src/x87/codegen-x87.cc
index 8112d11..5cda23d 100644
--- a/src/x87/codegen-x87.cc
+++ b/src/x87/codegen-x87.cc
@@ -33,10 +33,6 @@
 
 #define __ masm.
 
-UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
-  return nullptr;
-}
-
 
 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
   size_t actual_size;
@@ -269,6 +265,7 @@
 
   __ push(eax);
   __ push(ebx);
+  __ push(esi);
 
   __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
 
@@ -301,8 +298,9 @@
 
   // Call into runtime if GC is required.
   __ bind(&gc_required);
+
   // Restore registers before jumping into runtime.
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  __ pop(esi);
   __ pop(ebx);
   __ pop(eax);
   __ jmp(fail);
@@ -338,12 +336,11 @@
   __ sub(edi, Immediate(Smi::FromInt(1)));
   __ j(not_sign, &loop);
 
+  // Restore registers.
+  __ pop(esi);
   __ pop(ebx);
   __ pop(eax);
 
-  // Restore esi.
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
   __ bind(&only_change_map);
   // eax: value
   // ebx: target map
diff --git a/src/x87/deoptimizer-x87.cc b/src/x87/deoptimizer-x87.cc
index 9d4645e..15dabb9 100644
--- a/src/x87/deoptimizer-x87.cc
+++ b/src/x87/deoptimizer-x87.cc
@@ -277,8 +277,7 @@
   }
 
   int double_regs_offset = FrameDescription::double_registers_offset();
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   // Fill in the double input registers.
   for (int i = 0; i < X87Register::kMaxNumAllocatableRegisters; ++i) {
     int code = config->GetAllocatableDoubleCode(i);
diff --git a/src/x87/disasm-x87.cc b/src/x87/disasm-x87.cc
index 2a90df9..be9167b 100644
--- a/src/x87/disasm-x87.cc
+++ b/src/x87/disasm-x87.cc
@@ -920,6 +920,10 @@
       return "shrd";  // 3-operand version.
     case 0xAB:
       return "bts";
+    case 0xB0:
+      return "cmpxchg_b";
+    case 0xB1:
+      return "cmpxchg";
     case 0xBC:
       return "bsf";
     case 0xBD:
@@ -943,7 +947,11 @@
   } else if (*data == 0x2E /*cs*/) {
     branch_hint = "predicted not taken";
     data++;
+  } else if (*data == 0xF0 /*lock*/) {
+    AppendToBuffer("lock ");
+    data++;
   }
+
   bool processed = true;  // Will be set to false if the current instruction
                           // is not in 'instructions' table.
   const InstructionDesc& idesc = instruction_table_->Get(*data);
@@ -1162,6 +1170,24 @@
             } else {
               AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
             }
+          } else if (f0byte == 0xB0) {
+            // cmpxchg_b
+            data += 2;
+            AppendToBuffer("%s ", f0mnem);
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            data += PrintRightOperand(data);
+            AppendToBuffer(",%s", NameOfByteCPURegister(regop));
+          } else if (f0byte == 0xB1) {
+            // cmpxchg
+            data += 2;
+            data += PrintOperands(f0mnem, OPER_REG_OP_ORDER, data);
+          } else if (f0byte == 0xBC) {
+            data += 2;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("%s %s,", f0mnem, NameOfCPURegister(regop));
+            data += PrintRightOperand(data);
           } else if (f0byte == 0xBD) {
             data += 2;
             int mod, regop, rm;
@@ -1272,9 +1298,8 @@
           data++;
           int mod, regop, rm;
           get_modrm(*data, &mod, &regop, &rm);
-          AppendToBuffer("xchg_w ");
+          AppendToBuffer("xchg_w %s,", NameOfCPURegister(regop));
           data += PrintRightOperand(data);
-          AppendToBuffer(",%s", NameOfCPURegister(regop));
         } else if (*data == 0x89) {
           data++;
           int mod, regop, rm;
@@ -1513,6 +1538,9 @@
                            NameOfXMMRegister(regop),
                            NameOfXMMRegister(rm));
             data++;
+          } else if (*data == 0xB1) {
+            data++;
+            data += PrintOperands("cmpxchg_w", OPER_REG_OP_ORDER, data);
           } else {
             UnimplementedInstruction();
           }
@@ -1752,7 +1780,7 @@
 
 
 const char* NameConverter::NameOfAddress(byte* addr) const {
-  v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+  v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
   return tmp_buffer_.start();
 }
 
@@ -1815,7 +1843,7 @@
     buffer[0] = '\0';
     byte* prev_pc = pc;
     pc += d.InstructionDecode(buffer, pc);
-    fprintf(f, "%p", prev_pc);
+    fprintf(f, "%p", static_cast<void*>(prev_pc));
     fprintf(f, "    ");
 
     for (byte* bp = prev_pc; bp < pc; bp++) {
diff --git a/src/x87/interface-descriptors-x87.cc b/src/x87/interface-descriptors-x87.cc
index 260d871..99664dc 100644
--- a/src/x87/interface-descriptors-x87.cc
+++ b/src/x87/interface-descriptors-x87.cc
@@ -11,6 +11,14 @@
 
 const Register CallInterfaceDescriptor::ContextRegister() { return esi; }
 
+void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
+    CallInterfaceDescriptorData* data, int register_parameter_count) {
+  const Register default_stub_registers[] = {eax, ebx, ecx, edx, edi};
+  CHECK_LE(static_cast<size_t>(register_parameter_count),
+           arraysize(default_stub_registers));
+  data->InitializePlatformSpecific(register_parameter_count,
+                                   default_stub_registers);
+}
 
 const Register LoadDescriptor::ReceiverRegister() { return edx; }
 const Register LoadDescriptor::NameRegister() { return ecx; }
@@ -44,9 +52,6 @@
 const Register StoreTransitionDescriptor::MapRegister() { return ebx; }
 
 
-const Register LoadGlobalViaContextDescriptor::SlotRegister() { return ebx; }
-
-
 const Register StoreGlobalViaContextDescriptor::SlotRegister() { return ebx; }
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return eax; }
 
@@ -68,8 +73,6 @@
 const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
 const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
 
-const Register HasPropertyDescriptor::ObjectRegister() { return eax; }
-const Register HasPropertyDescriptor::KeyRegister() { return ebx; }
 
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -257,43 +260,27 @@
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
+void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // register state
   // eax -- number of arguments
   // edi -- function
   // ebx -- allocation site with elements kind
-  Register registers[] = {edi, ebx};
+  Register registers[] = {edi, ebx, eax};
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
+void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  // stack param count needs (constructor pointer, and single argument)
-  Register registers[] = {edi, ebx, eax};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void InternalArrayConstructorConstantArgCountDescriptor::
-    InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
   // register state
   // eax -- number of arguments
   // edi -- function
-  Register registers[] = {edi};
+  // ebx -- allocation site with elements kind
+  Register registers[] = {edi, ebx, eax};
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-
-void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  // stack param count needs (constructor pointer, and single argument)
-  Register registers[] = {edi, eax};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastArrayPushDescriptor::InitializePlatformSpecific(
+void VarArgFunctionDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // stack param count needs (arg count)
   Register registers[] = {eax};
diff --git a/src/x87/macro-assembler-x87.cc b/src/x87/macro-assembler-x87.cc
index 3cee0ea..ef96912 100644
--- a/src/x87/macro-assembler-x87.cc
+++ b/src/x87/macro-assembler-x87.cc
@@ -1043,8 +1043,8 @@
 
 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
   mov(vector, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-  mov(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
-  mov(vector, FieldOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+  mov(vector, FieldOperand(vector, JSFunction::kLiteralsOffset));
+  mov(vector, FieldOperand(vector, LiteralsArray::kFeedbackVectorOffset));
 }
 
 
@@ -2302,10 +2302,11 @@
                                              const ParameterCount& expected,
                                              const ParameterCount& actual) {
   Label skip_flooding;
-  ExternalReference step_in_enabled =
-      ExternalReference::debug_step_in_enabled_address(isolate());
-  cmpb(Operand::StaticVariable(step_in_enabled), Immediate(0));
-  j(equal, &skip_flooding);
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  cmpb(Operand::StaticVariable(last_step_action), Immediate(StepIn));
+  j(less, &skip_flooding);
   {
     FrameScope frame(this,
                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -2641,7 +2642,7 @@
 
 
 void MacroAssembler::Move(Register dst, const Immediate& x) {
-  if (x.is_zero()) {
+  if (x.is_zero() && RelocInfo::IsNone(x.rmode_)) {
     xor_(dst, dst);  // Shorter than mov of 32-bit immediate 0.
   } else {
     mov(dst, x);
diff --git a/src/zone.h b/src/zone.h
index fa21155..29055cb 100644
--- a/src/zone.h
+++ b/src/zone.h
@@ -8,9 +8,9 @@
 #include <limits>
 
 #include "src/base/accounting-allocator.h"
+#include "src/base/hashmap.h"
 #include "src/base/logging.h"
 #include "src/globals.h"
-#include "src/hashmap.h"
 #include "src/list.h"
 #include "src/splay-tree.h"
 
@@ -244,8 +244,7 @@
   void operator delete(void* pointer, Zone* zone) { UNREACHABLE(); }
 };
 
-
-typedef TemplateHashMapImpl<ZoneAllocationPolicy> ZoneHashMap;
+typedef base::TemplateHashMapImpl<ZoneAllocationPolicy> ZoneHashMap;
 
 }  // namespace internal
 }  // namespace v8